content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from colorsys import hls_to_rgb
from rpi_ws281x import Color
| [
6738,
7577,
893,
1330,
289,
7278,
62,
1462,
62,
81,
22296,
198,
6738,
374,
14415,
62,
18504,
30368,
87,
1330,
5315,
198
] | 2.772727 | 22 |
from typing import Optional, Union, Tuple, Set, List, Dict
from loguru import logger
from nonebot.adapters import Bot, Event
from nonebot.handler import Handler
from nonebot.permission import Permission
from nonebot.typing import T_Handler
from nonutils.command import Command
from nonutils.stringres import Rstr
class Service:
"""服务类,用于命令组管理。
服务(Service) 是一套命令管理工具,功能方面类似于 Nonebot 中的 command_group,而在管理方面类似于 (sub)plugin
每一个 Service 都可以添加多个命令,在会话中通过以下方式激活:
<command_start><sv_name|sv_aliases> <cmd|aliases> <args>
如 /test_service test_cmd arg1 arg2,即可触发 test_service 的 test_cmd
其中 sv_name 与 cmd 都可设置相应的 aliases
可以通过 service.on_command 声明命令,用法同 nonebot.on_command
设置 on_command 的 cmd 为 None,即声明 当服务被调用了未声明的命令 / 只输入服务名 时的处理方式
Inspired by https://github.com/Kyomotoi/ATRI/blob/HEAD/ATRI/service.py
"""
# def __eq__(self, other: Union['Service', str]):
# assert isinstance(other, (Service, str)), TypeError(
# f"'==' Not supported between instances of 'Service' and '{type(other)}'")
# if isinstance(other, str):
# return self.sv_name == other
# return self.sv_name == other.sv_name
# A dict for preserving all maintained services
# {sv_name: sv_obj}
services: Dict[str, Service] = {}
| [
6738,
19720,
1330,
32233,
11,
4479,
11,
309,
29291,
11,
5345,
11,
7343,
11,
360,
713,
198,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
4844,
13645,
13,
324,
12126,
1330,
18579,
11,
8558,
198,
6738,
4844,
13645,
13,
30281,
1330,
32... | 1.846705 | 698 |
import pdb
import ipywidgets as widgets
from IPython.display import display
import subprocess
from pathlib import Path
class ControlPanel():
"""
Ipywidgets panel allowing high-level control of the HITs creation,confirmation
validation and deletion along with monitoring of number of HITs per worker
"""
def __init__(self,turk,watcher=None):
"""
Args:
turk (mt2gf.Turker): turker instance which will be used through the control panel
watcher (mt2gf.Watcher): watcher instance which will be used throught the control panel. If
set to None, no monitoring buttons will be displayed
"""
self.turk = turk
self.watcher = watcher
self.display_panel()
def list_hits(self,b):
"""
Button: List the published HITs. Completed designates the number of completed
forms for the given HIT. Once Percent_completed reaches 100, the HIT status becomes "Assignable"
"""
df = self.turk.list_hits()
display(df,self.output)
def create_hits(self,b):
"""
Generate and publish the HITs tasks as described by turk.gform_map.
"""
self.turk.create_forms_hits()
def approve_correct_dry(self,b):
"""
Call Turker.approve_correct_hits: this
"""
self.turk.save_worker_infos()
self.turk.approve_correct_hits(dry_run=True)
def approve_correct(self,b):
"""
"""
self.turk.save_worker_infos()
self.turk.approve_correct_hits(dry_run=False)
def approve_all(self,b):
"""
"""
self.turk.save_worker_infos()
self.turk.approve_all_hits()
def list_assignments(self,b):
"""
"""
display(self.turk.list_all_assignments())
def stop_all_hits(self,b):
"""
"""
self.turk.stop_all_hits()
def delete_all_hits(self,b):
"""
"""
self.turk.delete_all_hits()
def start_monitor(self,b):
"""
"""
self.watcher.start_monitor()
def stop_monitor(self,b):
"""
"""
self.watcher.stop_monitor()
def tagged_workers(self,b):
"""
"""
print("Tagged workers:")
display(self.watcher.get_tagged_workers())
def results_hitid_formidx(self,sender):
"""
"""
df = self.turk.get_results(self.b_resform.value)
display(df)
def untag_all_workers(self,b):
"""
"""
self.watcher.untag_all_workers()
def display_panel(self):
"""
Displays the most important features from the Turker through an Ipywidgets
control panel in the jupyer notebook
Args:
turk (mt2gf.Turker): the Turker instance that will handle the actions
gform_map_id (str): google drive id of the google forms mapping file
"""
self.output = widgets.Output()
approve_color = 'lightgreen'
stop_color = 'orange'
# 1. List hits button
b_listhits = widgets.Button(description='list hits')
b_listhits.on_click(self.list_hits)
# 2. Create hits button
# create the hits linked to the turker
b_createhits = widgets.Button(description='create hits')
b_createhits.on_click(self.create_hits)
# 3. Approve correct dry run
# same output than if approve_correct was ran with no consequence
b_appcorrdry = widgets.Button(description='approve correct (dry)')
b_appcorrdry.on_click(self.approve_correct_dry)
b_appcorrdry.style.button_color = approve_color
# 4. Approve correct HITs
appcorr = widgets.Button(description='approve_correct')
appcorr.on_click(self.approve_correct)
appcorr.style.button_color = approve_color
# 5. Approve all HITs
b_appall = widgets.Button(description='approve all')
b_appall.on_click(self.approve_all)
b_appall.style.button_color = approve_color
# 6. List all Assignments
b_allass = widgets.Button(description='list assignments')
b_allass.on_click(self.list_assignments)
# 7. Stop all hits
bstop = widgets.Button(description='stop all hits')
bstop.on_click(self.stop_all_hits)
bstop.style.button_color = stop_color
# 8. Delete all hits
bdelete = widgets.Button(description='delete all hits',button_style='danger')
bdelete.on_click(self.delete_all_hits)
# 9. Start a watcher thread
bwatcher = widgets.Button(description='start monitor',button_style='info')
bwatcher.on_click(self.start_monitor)
# 10. Stop a monitor thread if one exists
bstopwatcher = widgets.Button(description='stop monitor',button_style='primary')
bstopwatcher.on_click(self.stop_monitor)
# 11. List tagged workers a monitor thread if one exists
btagwork = widgets.Button(description='list tagged workers',button_style='primary')
btagwork.on_click(self.tagged_workers)
# 12. Untag all workers
buntagwork = widgets.Button(description='untag all workers',button_style='primary')
buntagwork.on_click(self.untag_all_workers)
# 12. Display downloaded result for given HITid/form idx
self.b_resform = widgets.Text( placeholder='Results HITid/formidx')
self.b_resform.on_submit(self.results_hitid_formidx)
# display the buttons
display(widgets.HBox((b_listhits, b_createhits)))
display(widgets.HBox((b_allass,b_appall)))
display(widgets.HBox((b_appcorrdry, appcorr)))
display(widgets.HBox((bstop, bdelete)))
if self.watcher is not None:
display(widgets.HBox((bwatcher, bstopwatcher)))
display(widgets.HBox((btagwork,buntagwork)))
display(self.b_resform) | [
11748,
279,
9945,
198,
198,
11748,
20966,
88,
28029,
11407,
355,
40803,
198,
6738,
6101,
7535,
13,
13812,
1330,
3359,
198,
11748,
850,
14681,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
198,
4871,
6779,
26639,
33529,
198,
220,
220,
22... | 2.302181 | 2,568 |
from django.contrib import admin
from . models import Doctor, Contact,Prescription_pictures,Bloodds
# Register your models here.
admin.site.register(Doctor)
admin.site.register(Contact)
admin.site.register(Prescription_pictures)
admin.site.register(Bloodds)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
4981,
1330,
9356,
11,
14039,
11,
25460,
6820,
62,
18847,
942,
11,
21659,
9310,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
15654,
13,
30238,
7,
37564,
8,
198,
28... | 3.48 | 75 |
import math
import itertools
import collections
from sqlalchemy.orm import joinedload
from pycldf import Sources
from clldutils.misc import nfilter
from clldutils.color import qualitative_colors
from clldutils.text import strip_brackets
from clld.cliutil import Data, bibtex2source
from clld.db.meta import DBSession
from clld.db.models import common
from clld.lib import bibtex
import colorcet
import pulotu
from pulotu import models
def prime_cache(args):
"""If data needs to be denormalized for lookup, do that here.
This procedure should be separate from the db initialization, because
it will have to be run periodically whenever data has been updated.
"""
for l in DBSession.query(common.Language).options(
joinedload(common.Language.valuesets).joinedload(common.ValueSet.references)):
spks = set(itertools.chain(*[[ref.source_pk for ref in vs.references] for vs in l.valuesets]))
for spk in spks:
DBSession.add(common.LanguageSource(language_pk=l.pk, source_pk=spk))
| [
11748,
10688,
198,
11748,
340,
861,
10141,
198,
11748,
17268,
198,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
5399,
2220,
198,
6738,
12972,
66,
335,
69,
1330,
26406,
198,
6738,
269,
297,
67,
26791,
13,
44374,
1330,
299,
24455,
198,
... | 2.929775 | 356 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
##
from model import data_processing
import util
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from model.estimator import FBProphetEstimator
x_train = pd.DataFrame({"ds": data_processing.X_TRAIN})
y_train = pd.DataFrame({"y": data_processing.Y_TRAIN})
x_test = pd.DataFrame({"ds": data_processing.X_TEST})
y_test = data_processing.Y_TEST
x_y_train = pd.concat([x_train, y_train], axis=1)
changepoint_prior_scale = [0.4, 0.45, 0.5, 0.55, 0.6]
param_tune = {"param1": changepoint_prior_scale}
fb_proph = GridSearchCV(
FBProphetEstimator(), param_tune, cv=util.CV, scoring=util.MERIC_SCORING
)
fb_proph.fit(x_y_train, y_train)
fb_proph.best_params_
util.plot_actual_predicted(x_train, y_train, fb_proph.predict(x_train))
fb_y_pred = fb_proph.predict(x_test)
test_fb_err = metrics.mean_absolute_error(fb_y_pred, y_test)
| [
2235,
198,
6738,
2746,
1330,
1366,
62,
36948,
198,
11748,
7736,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
24846,
18243,
33538,
198,
6738,
2746,
13,
395... | 2.417582 | 364 |
import os
from src.context import Context
from src.errors import RTError
from src.ast.tokens import TokType
from src.ast.nodes import *
from src.buildchain import BuildCache
from src.buildchain import Visitor
from src.valtypes import Value
from src.valtypes.array import Array
from src.valtypes.dict import Dict
from src.valtypes.string import String
from src.valtypes.number import Number
from src.valtypes.func import Func
from src.utils import printError
| [
11748,
28686,
198,
6738,
12351,
13,
22866,
1330,
30532,
198,
6738,
12351,
13,
48277,
1330,
11923,
12331,
198,
6738,
12351,
13,
459,
13,
83,
482,
641,
1330,
9453,
6030,
198,
6738,
12351,
13,
459,
13,
77,
4147,
1330,
1635,
198,
6738,
12... | 3.634921 | 126 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
from typing import Optional, Tuple, List
from oneflow.framework.tensor import Tensor
import oneflow as flow
| [
37811,
198,
15269,
12131,
383,
1881,
37535,
46665,
13,
1439,
2489,
10395,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.882979 | 188 |
import click
from halo import Halo
from .svm import Shop, DummyShop
from .exceptions import Failure
shop = w if (w := Shop.load(silent=True)) else DummyShop()
@click.group()
@click.command()
@click.command()
@click.option('--reason', help="The changes made in this commit", default="Made changes")
@click.command()
@click.option('--file', help='The file to be backed up to', default='shop')
@click.command()
@click.option('--file', help='The file to be restored from', default='shop')
@click.option('--no-catch', help='raise all errors normally', default=False, is_flag=True)
@click.command()
@click.option('-n', help='how many revisions back', default=1)
@click.command()
@click.option('-n', help='how many revisions forward', default=1)
@click.command()
@click.option('--tree', help='Whether the display should be a tree', is_flag=True)
@click.command()
cli.add_command(init)
cli.add_command(save)
cli.add_command(backup)
cli.add_command(restore)
cli.add_command(revert)
cli.add_command(advance)
cli.add_command(inspect)
cli.add_command(debug)
if __name__ == '__main__':
cli()
| [
11748,
3904,
198,
6738,
289,
7335,
1330,
18597,
198,
6738,
764,
82,
14761,
1330,
13705,
11,
360,
13513,
29917,
198,
6738,
764,
1069,
11755,
1330,
25743,
198,
198,
24643,
796,
266,
611,
357,
86,
19039,
13705,
13,
2220,
7,
18217,
298,
2... | 2.951482 | 371 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# vim: ts=4 sw=4 et ai:
from distutils.core import setup
long_description = """\
Copyright, Michael P. Soulier, 2010.
About Release 0.7.0:
====================
Various bugfixes and refactoring for improved logging.
Now requiring python 2.7+ and tightening syntax in
preparation for supporting python 3.
About Release 0.6.2:
====================
Maintenance release to fix a couple of reported issues.
About Release 0.6.1:
====================
Maintenance release to fix several reported problems, including a rollover
at 2^16 blocks, and some contributed work on dynamic file objects.
About Release 0.6.0:
====================
Maintenance update to fix several reported issues, including proper
retransmits on timeouts, and further expansion of unit tests.
About Release 0.5.1:
====================
Maintenance update to fix a bug in the server, overhaul the documentation for
the website, fix a typo in the unit tests, fix a failure to set default
blocksize, and a divide by zero error in speed calculations for very short
transfers.
Also, this release adds support for input/output in client as stdin/stdout
About Release 0.5.0:
====================
Complete rewrite of the state machine.
Now fully implements downloading and uploading.
About Release 0.4.6:
====================
Feature release to add the tsize option.
Thanks to Kuba Kończyk for the patch.
About Release 0.4.5:
====================
Bugfix release for compatability issues on Win32, among other small issues.
About Release 0.4.4:
====================
Bugfix release for poor tolerance of unsupported options in the server.
About Release 0.4.3:
====================
Bugfix release for an issue with the server's detection of the end of the file
during a download.
About Release 0.4.2:
====================
Bugfix release for some small installation issues with earlier Python
releases.
About Release 0.4.1:
====================
Bugfix release to fix the installation path, with some restructuring into a
tftpy package from the single module used previously.
About Release 0.4:
==================
This release adds a TftpServer class with a sample implementation in bin.
The server uses a single thread with multiple handlers and a select() loop to
handle multiple clients simultaneously.
Only downloads are supported at this time.
About Release 0.3:
==================
This release fixes a major RFC 1350 compliance problem with the remote TID.
About Release 0.2:
==================
This release adds variable block sizes, and general option support,
implementing RFCs 2347 and 2348. This is accessible in the TftpClient class
via the options dict, or in the sample client via the --blocksize option.
About Release 0.1:
==================
This is an initial release in the spirit of "release early, release often".
Currently the sample client works, supporting RFC 1350. The server is not yet
implemented, and RFC 2347 and 2348 support (variable block sizes) is underway,
planned for 0.2.
About Tftpy:
============
Purpose:
--------
Tftpy is a TFTP library for the Python programming language. It includes
client and server classes, with sample implementations. Hooks are included for
easy inclusion in a UI for populating progress indicators. It supports RFCs
1350, 2347, 2348 and the tsize option from RFC 2349.
Dependencies:
-------------
Python 2.7+, hopefully. Let me know if it fails to work.
Trifles:
--------
Home page: http://tftpy.sf.net/
Project page: http://sourceforge.net/projects/tftpy/
License is the MIT License
See COPYING in this distribution.
Limitations:
------------
- Only 'octet' mode is supported.
- The only options supported are blksize and tsize.
Author:
=======
Michael P. Soulier <msoulier@digitaltorque.ca>
"""
setup(name='tftpy',
version='0.7.0',
description='Python TFTP library',
long_description=long_description,
long_description_content_type='text/plain',
author='Michael P. Soulier',
author_email='msoulier@digitaltorque.ca',
url='http://github.com/msoulier/tftpy',
packages=['tftpy'],
scripts=['bin/tftpy_client.py','bin/tftpy_server.py'],
project_urls={
'Documentation': 'http://tftpy.sourceforge.net/sphinx/index.html',
'Source': 'https://github.com/msoulier/tftpy/',
'Tracker': 'https://github.com/msoulier/tftpy/issues',
},
python_requires='>=2.7',
classifiers=[
'Programming Language :: Python :: 2.7',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet',
]
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
43907,
25,
40379,
28,
19,
1509,
28,
19,
2123,
257,
72,
25,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
1... | 3.361227 | 1,434 |
import gdxpds as gd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
### This file reads optimal load scheduling from Julia ouput and generates aggregated
### consumption of alll categories, as in as in section 7.2.3 - step 2, following project report.
### Result is saved in csv and then converted to inc from xls2gdx ibrary in excel VBA.
# Sceanario and iteration
scenario='Expected' ### choose between Expected, RapidScenario and SlowScenario
adoption=0.25 ### choose
iter='5it' ### choose #it
# EV (categ,RRR,YYY,EH)
evnumbers=pd.read_csv('JuliatoBALMOREL/evsharesfin_'+scenario+'.csv',header=[0,1,2],index_col=0)
evnumbers.iloc[0:12]=evnumbers.iloc[0:12]*0.8 #houses only 80% at home
evnumbers.iloc[12:24]=evnumbers.iloc[12:24]*0.1 #apartments only 10% at home
categories=evnumbers.index
coefficients=pd.read_csv('JuliatoBALMOREL/evcoefficients.csv',skiprows=1,names=categories)/100 #[8*30]
years=['2020','2025','2030','2040']
lista=list(evnumbers.index.array)
lista.append('TOT')
EV=pd.DataFrame(index=range(8736),columns=range(1000))
EV.columns=pd.MultiIndex.from_product([lista,['DK1','DK2'],years,['BEV','BEVTOT','TOT','PHEV','PHEVTOT']])
TOTTOT=pd.DataFrame()
for DK in ['DK1','DK2']:
for year in years:
chargingBEV = pd.read_csv('JuliatoBALMOREL/Juliaout/FlexEV/FlexEV_' + DK + '_y' + year+ '.csv', header=None)
chargingPHEV = pd.read_csv('JuliatoBALMOREL/Juliaout/FlexEV/FlexPHEV_' + DK + '_y' + year + '.csv', header=None)
for category in evnumbers.index:
weights=[evnumbers.loc[category][(DK,'BEV',year)],evnumbers.loc[category][(DK,'PHEV',year)]]
EV[category,DK,year,'BEV']=sum(chargingBEV.iloc[:, i] * coefficients[category][i] for i in np.arange(20))
EV[category,DK,year,'PHEV']=sum(chargingPHEV.iloc[:, i] * coefficients[category][i+20] for i in np.arange(10))
EV[category, DK, year, 'TOT']=(EV[category,DK,year,'BEV'] * weights[0] +EV[category,DK,year,'PHEV'] * weights[1])/sum(weights)
EV[category, DK, year, 'BEVTOT']=EV[category,DK,year,'BEV'] * evnumbers.loc[category][(DK,'BEV',year)]
EV[category, DK, year, 'PHEVTOT']=EV[category,DK,year,'PHEV'] * evnumbers.loc[category][(DK,'PHEV',year)]
EV['TOT', DK, year, 'BEVTOTT'] = EV[EV.columns[(EV.columns.get_level_values(1)==DK)&(EV.columns.get_level_values(2)==year) & (EV.columns.get_level_values(3)=='BEVTOT')]].sum(axis=1)
EV['TOT', DK, year, 'PHEVTOTT'] = EV[EV.columns[(EV.columns.get_level_values(1)==DK)&(EV.columns.get_level_values(2)==year) & (EV.columns.get_level_values(3)=='PHEVTOT')]].sum(axis=1)
TOTTOT[DK,year,'EV']=EV['TOT', DK, year, 'BEVTOTT']+EV['TOT', DK, year, 'PHEVTOTT']
# heavy
# EV.to_csv('JuliatoBALMOREL/EV'+scenario+str(adoption)+iter+'.csv')
# HP (categ,RRR,YYY,EH)
ehnumbers=pd.read_csv('JuliatoBALMOREL/ShareHP_categoriesfin_'+scenario+'.csv',header=[0,1,2],index_col=0)
EH=pd.DataFrame(index=range(8736),columns=range(800))
EH.columns=pd.MultiIndex.from_product([lista,['DK1','DK2'],years,['EH','AWTOT','AATOT','AASHTOT']]) # ('AA','AW','AASH')
for DK in ['DK1','DK2']:
for year in years:
AWprofile = pd.read_csv('JuliatoBALMOREL/Juliaout/FlexEH/FlexEHAW_' + DK + '_y' + year + '.csv', names=categories)
AAprofile = pd.read_csv('JuliatoBALMOREL/Juliaout/FlexEH/FlexEHAA_' + DK + '_y' + year + '.csv', names=categories)
AASHprofile = pd.read_csv('JuliatoBALMOREL/Juliaout/FlexEH/FlexEHSumHouse_' + DK + '_y' + year + '.csv',names=categories)
for category in categories:
weights=[ehnumbers.loc[category][DK, 'AW', year],ehnumbers.loc[category][DK, 'AA', year],ehnumbers.loc[category][DK, 'AASH', year]]
#EH[category, DK, year, 'AA'] = (AWprofile[category] * weights[0] + AAprofile[category] * weights[1] +AASHprofile[category] * weights[2]) / sum(weights) # change with share per each category
#EH[category, DK, year, 'AW'] = (AWprofile[category] * weights[0] + AAprofile[category] * weights[1] +AASHprofile[category] * weights[2]) / sum(weights) # change with share per each category
#EH[category, DK, year, 'AASH'] = (AWprofile[category] * weights[0] + AAprofile[category] * weights[1] +AASHprofile[category] * weights[2]) / sum(weights) # change with share per each category
EH[category, DK, year, 'EH'] = (AWprofile[category] * weights[0] + AAprofile[category] * weights[1] +AASHprofile[category] * weights[2]) / sum(weights) # change with share per each category
EH[category, DK, year, 'AWTOT'] = AWprofile[category] * ehnumbers.loc[category][DK, 'AW', year]
EH[category, DK, year, 'AATOT'] = AAprofile[category] * ehnumbers.loc[category][(DK, 'AA', year)]
EH[category, DK, year, 'AASHTOT'] = AASHprofile[category] * ehnumbers.loc[category][(DK, 'AASH', year)]
EH['TOT', DK, year, 'AWTOTT'] = EH[EH.columns[(EH.columns.get_level_values(1)==DK)&(EH.columns.get_level_values(2)==year) & (EH.columns.get_level_values(3)=='AWTOT')]].sum(axis=1)
EH['TOT', DK, year, 'AATOTT'] = EH[EH.columns[(EH.columns.get_level_values(1)==DK)&(EH.columns.get_level_values(2)==year) & (EH.columns.get_level_values(3)=='AATOT')]].sum(axis=1)
EH['TOT', DK, year, 'AASHTOTT'] = EH[EH.columns[(EH.columns.get_level_values(1)==DK)&(EH.columns.get_level_values(2)==year) & (EH.columns.get_level_values(3)=='AASHTOT')]].sum(axis=1)
TOTTOT[DK,year,'EH']=EH['TOT', DK, year, 'AWTOTT']+EH['TOT', DK, year, 'AATOTT']+EH['TOT', DK, year, 'AASHTOTT']
# heavy
# EH.to_csv('JuliatoBALMOREL/EH'+scenario+str(adoption)+iter+'.csv')
# Yearly normalization
RESEVHP=pd.DataFrame()
ssn=[0,1680,3360,5040,8736]
RESEVHPDK1=pd.DataFrame()
RESEVHPDK2=pd.DataFrame()
for i in years:
RESEVHPDK1[i]=pd.concat([TOTTOT['DK1',i,'EV'][ssn[p]:ssn[p+1]]-TOTTOT['DK1',i,'EV'][ssn[p]:ssn[p+1]].mean() for p in range(4)])+\
pd.concat([TOTTOT['DK1',i,'EH'][ssn[p]:ssn[p+1]]-TOTTOT['DK1',i,'EH'][ssn[p]:ssn[p+1]].mean() for p in range(4)])
RESEVHPDK2[i]=pd.concat([TOTTOT['DK2',i,'EV'][ssn[p]:ssn[p+1]]-TOTTOT['DK2',i,'EV'][ssn[p]:ssn[p+1]].mean() for p in range(4)])+\
pd.concat([TOTTOT['DK2',i,'EH'][ssn[p]:ssn[p+1]]-TOTTOT['DK2',i,'EH'][ssn[p]:ssn[p+1]].mean() for p in range(4)])
RESEVHP['DK1']=pd.concat([RESEVHPDK1[i] for i in years])
RESEVHP['DK2']=pd.concat([RESEVHPDK2[i] for i in years])
# Adoption and to MW
# adoption= 0.40 ### choose
RESEVHPOK=RESEVHP*adoption/1000
if scenario=='RapidScenario':
RESEVHPOK['DK1']=RESEVHPOK['DK1']+TSRapDK1
RESEVHPOK['DK2']=RESEVHPOK['DK2']+TSRapDK2
if scenario=='SlowScenario':
RESEVHPOK['DK1']=RESEVHPOK['DK1']+TSSlowDK1
RESEVHPOK['DK2']=RESEVHPOK['DK2']+TSSlowDK2
# Export
RESEVHPOK.to_csv('BALMOREL/RESEVHP files/xlsx/DE_RESEVHP'+scenario+str(adoption)+iter+'.csv') # normalized already but non adopted
RESEVHPOK.describe()
| [
11748,
308,
34350,
79,
9310,
355,
308,
67,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
21017,
770,
2393,
9743,
16586,
3440,
269... | 2.189997 | 3,179 |
import sys
from numba import extension_types
from numba import *
@jit
class MyExtension(object):
"""
>>> obj = MyExtension(10.0)
>>> obj.value
10.0
>>> obj._numba_attrs.value
10.0
>>> obj.setvalue(20.0)
>>> obj.getvalue()
20.0
>>> obj.value
20.0
>>> obj.getvalue.__name__
'getvalue'
>>> obj.getvalue.__doc__
'Return value'
>>> type(obj.getvalue.im_func)
<type 'cython_function_or_method'>
>>> obj._numba_attrs._fields_
[('value', <class 'ctypes.c_double'>)]
"""
@void(double)
@double()
def getvalue(self):
"Return value"
return self.value
@void(double)
def setvalue(self, value):
"Set value"
self.value = value
@object_()
@jit
class ObjectAttrExtension(object):
"""
>>> obj = ObjectAttrExtension(10.0, 'blah')
Traceback (most recent call last):
...
TypeError: a float is required
>>> obj = ObjectAttrExtension(10.0, 3.5)
>>> obj.value1
10.0
>>> obj = ObjectAttrExtension('hello', 9.3)
>>> obj.value1
'hello'
>>> obj.setvalue(20.0)
>>> obj.getvalue()
20.0
>>> obj.value1 = MyExtension(10.0)
>>> obj.value1
MyExtension10.0
>>> obj.getvalue()
MyExtension10.0
>>> obj.method()
MyExtension10.0
>>> obj.method2(15.0)
30.0
>>> obj._numba_attrs._fields_
[('value2', <class 'ctypes.c_double'>), ('value1', <class 'ctypes.py_object'>)]
"""
@object_()
def getvalue(self):
"Return value"
return self.value1
@void(double)
def setvalue(self, value):
"Set value"
self.value1 = value
@object_()
@object_(int32)
if __name__ == '__main__':
import doctest
doctest.testmod() | [
11748,
25064,
198,
198,
6738,
997,
7012,
1330,
7552,
62,
19199,
198,
6738,
997,
7012,
1330,
1635,
198,
198,
31,
45051,
198,
4871,
2011,
11627,
3004,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13163,
26181,
796,
201... | 2.17402 | 816 |
from setuptools import setup, find_packages
setup(
name="alesisvsysex",
version="0.0.1",
install_requires=["pytest", "python-rtmidi", "mido", "vext.pyqt5"],
packages=find_packages(),
entry_points={
'console_scripts': ['alesisvsysex=alesisvsysex.__main__:main'],
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
2040,
271,
85,
1837,
8044,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
15,
13,
16,
1600,
198,
220,
220,
220,
2721,
62,... | 2.34375 | 128 |
"""
Module with all Payment Expirience Web Profiles related entities.
"""
from enum import Enum
from typing import Type, List
from pypaypal.entities.base import (
T,
PayPalEntity,
ResponseType
)
class FlowConfig(PayPalEntity):
"""Webexp Flow configuration object representation.
"""
@property
@property
@classmethod
@classmethod
class InputField(PayPalEntity):
"""Input field obj representation
"""
@classmethod
@classmethod
class Presentation(PayPalEntity):
"""WebExp Page Presentation obj representation
"""
@classmethod
@classmethod
class WebExpProfile(PayPalEntity):
"""Web Expirience Profile object representation
"""
@classmethod
@classmethod | [
37811,
198,
220,
220,
220,
19937,
351,
477,
28784,
1475,
4063,
1240,
5313,
4415,
2915,
3519,
12066,
13,
198,
37811,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
5994,
11,
7343,
198,
198,
6738,
279,
4464,
323,
18596,
... | 2.829091 | 275 |
#!/usr/bin/python
class Problem9:
'''
Special Pythagorean triplet
Problem 9
31875000
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
'''
p = Problem9()
print(p.solution()) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
4871,
20647,
24,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
6093,
48657,
363,
29456,
15055,
83,
198,
220,
220,
220,
20647,
860,
198,
220,
220,
220,
39320,
2425,
830,
628,
... | 2.472393 | 163 |
from main import *
| [
6738,
1388,
1330,
1635,
198
] | 3.8 | 5 |
import os
import cv2
import numpy as np
import argparse
import zipfile
import _pickle as pickle
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Visualization crop results')
parser.add_argument('--annotation', dest='annotation',
help='annotation file path',
default='',
type=str)
parser.add_argument('--image_dir', dest='image_dir',
help='image dir',
default='',
type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
with open(args.annotation, 'rb') as f:
annos = pickle.load(f)
inds = np.random.permutation(len(annos['seqs']))
is_zip = False
if args.image_dir[-4:] == '.zip':
is_zip = True
zf = zipfile.ZipFile(args.image_dir, 'r')
for i in inds:
seq = annos['seqs'][i]
num_frames = len(seq['frames'])
for frame_id in range(num_frames):
if not is_zip:
frame_path = os.path.join(args.image_dir, seq['frames'][frame_id])
img = cv2.imread(frame_path, cv2.IMREAD_COLOR)
else:
buf = zf.read(name=seq['frames'][frame_id])
img = cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), cv2.IMREAD_COLOR)
bboxes = seq['bboxes'][frame_id]
for bbox_id, bbox in enumerate(bboxes):
ec = (0, 255, 0) if bbox_id == 0 else (0, 255, 255)
bbox = np.round(bbox).astype(np.int32)
img = cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=ec)
cv2.imshow('show', img)
cv2.waitKey(40)
cv2.waitKey(0)
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
19974,
7753,
198,
11748,
4808,
27729,
293,
355,
2298,
293,
628,
198,
4299,
21136,
62,
22046,
33529,
198,
220,
220,
220,
3722... | 1.896091 | 972 |
#!/usr/bin/env python
"""Usage: psutil-procsmem [-h] [-s SORT]
Options:
-h --help # print this help
-s [SORT] --sort [SORT] # sort by column, either uss, pss, swap or pss
Description:
Show "real" (USS) memory usage about all (querable) processes.
"USS" (Unique Set Size) memory is probably the most representative metric
for determining how much memory is actually being used by a process as it
represents the amount of memory that would be freed if the process was
terminated right now.
This is similar to "smem" cmdline utility on Linux:
https://www.selenic.com/smem/
"""
from __future__ import print_function
import sys
from docopt import docopt
import psutil
from psutilcli import bytes2human
from psutilcli import color_cmdline
from psutilcli import exit
from psutilcli import warn
from psutilcli.compat import get_terminal_size
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
28350,
25,
26692,
22602,
12,
36942,
5796,
368,
25915,
71,
60,
25915,
82,
311,
9863,
60,
198,
198,
29046,
25,
198,
220,
532,
71,
1377,
16794,
220,
220,
220,
220,
220,
220,... | 3.155172 | 290 |
from __future__ import unicode_literals
import django_cache_url
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
42625,
14208,
62,
23870,
62,
6371,
628,
198
] | 3.190476 | 21 |
# graph_modifications.py
# -------------------------------------------------------------------------
# This script creates a simple plot with two lines, then modifies many
# features of the plot, including axis labels, labels and legend, line
# style, tick labels, and title.
# -------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# Generate data for plot.
num_points = 26
x_min, x_max = 0, 4
x_values = np.linspace(x_min, x_max, num_points)
y_values = x_values**2
# Create empty figure.
plt.figure()
# Plot data.
plt.plot(x_values, y_values, label="Population 1") # label when plot is created
plt.plot(x_values, x_values**3, label="Population 2") # label when plot is created
plt.legend()
# Gain control of current Axes object.
ax = plt.gca()
# Give plot a title.
ax.set_title("My First Plot", family='monospace', size=24, weight='bold')
# Label the axes.
ax.set_xlabel("Time [days]")
ax.set_ylabel("Population")
# Change tick labels and font
ax.set_xticklabels(ax.get_xticks(), family='monospace', fontsize=10)
ax.set_yticklabels(ax.get_yticks(), family='monospace', fontsize=10)
# Change the legend
lines = ax.get_lines() # returns a list of line objects
lines[0].set_label("Infected Population") # change labels using line objects
lines[1].set_label("Cured Population") # change labels using line objects
ax.legend() # display legend in plot
# Make the first line a thick, red, dashed line.
plt.setp(lines[0], linestyle='--', linewidth=3, color='r')
# Change the legend again.
ax.legend(("Healthy", "Recovered")) # change labels using Axes object
plt.show()
| [
2,
4823,
62,
4666,
6637,
13,
9078,
198,
2,
16529,
45537,
198,
2,
770,
4226,
8075,
257,
2829,
7110,
351,
734,
3951,
11,
788,
953,
6945,
867,
198,
2,
3033,
286,
262,
7110,
11,
1390,
16488,
14722,
11,
14722,
290,
8177,
11,
1627,
198,... | 3.137736 | 530 |
import json
import os
import shutil
import uuid
from distutils.dir_util import copy_tree
import requests
from kaos_cli.constants import DOCKER, MINIKUBE, AWS, BACKEND, INFRASTRUCTURE, GCP, LOCAL_CONFIG_DICT, \
CONTEXTS, ACTIVE, BACKEND_CACHE, DEFAULT, USER, REMOTE, KAOS_STATE_DIR
from kaos_cli.exceptions.exceptions import HostnameError
from kaos_cli.services.state_service import StateService
from kaos_cli.services.terraform_service import TerraformService
from kaos_cli.utils.environment import check_environment
from kaos_cli.utils.helpers import build_dir
from kaos_cli.utils.validators import EnvironmentState, is_cloud_provider
class BackendFacade:
"""
This class should handle all backend related configuration and settings.
"""
@property
@property
@property
@property
@property
@staticmethod
@staticmethod
def _remove_build_files(self, dir_build):
"""
Function to remove backend directory
"""
self.state_service.provider_delete(dir_build)
if not self.state_service.list_providers():
self.state_service.full_delete(dir_build)
@staticmethod
@staticmethod
def _parse_config(dir_build):
"""
Basic function to extract endpoint from deployed backend service
"""
config_json_path = os.path.join(dir_build, 'config.json')
with open(config_json_path) as f:
raw_config = json.load(f)
domain_value = raw_config["backend_domain"][0]
hostname = domain_value.get("hostname")
ip = domain_value.get("ip")
domain = hostname or ip
if not domain:
raise HostnameError("Hostname not present")
port = int(raw_config["backend_port"])
path = raw_config["backend_path"]
url = f"http://{domain}:{port}{path}"
kubeconfig = raw_config["kubeconfig"]
return url, kubeconfig
@staticmethod
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
334,
27112,
198,
6738,
1233,
26791,
13,
15908,
62,
22602,
1330,
4866,
62,
21048,
198,
198,
11748,
7007,
198,
6738,
479,
7495,
62,
44506,
13,
9979,
1187,
1330,
360,
1129... | 2.570674 | 757 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 09:58:50 2020
@author: duttar
Description: Inverting for ALT from the seasonal subsidence results
Using mixed soil conditions:
a) .9 porosity for top organic layer
b) exponential decay of porosity below organic layer
c) .44 porosity for pure mineral
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import h5py
from datetime import datetime as dt
import multiprocessing
from joblib import Parallel, delayed
from functools import partial
import scipy.io as sio
from scipy.integrate import quad
from scipy.optimize import leastsq
# work directory
proj_dir = os.path.expanduser('/data/not_backed_up/rdtta/Permafrost/Alaska/North_slope/DT102/Stack/timeseries')
# file in geo coordinates
geom_file = os.path.join(proj_dir, 'geo/geo_geometryRadar.h5')
ts_file = os.path.join(proj_dir, 'geo/geo_timeseries_ramp_demErr.h5')
maskfile = os.path.join(proj_dir, 'geo/geo_maskTempCoh.h5')
with h5py.File(maskfile, "r") as f:
a_group_key = list(f.keys())[0]
maskbool = list(f[a_group_key])
maskbool = np.array(maskbool)
mat_c1 = sio.loadmat('subsdata.mat')
longitude = mat_c1['lon']
latitude = mat_c1['lat']
subs_data = mat_c1['subs_data']
seasonal_subs = subs_data[:,1]
ifglen = np.shape(longitude)[0]
ifgwid = np.shape(longitude)[1]
numpixels = ifglen*ifgwid
# function of porosity for mixed soil
integrad_mixed = lambda y: .0028 + (2/23)*(.44 + .56*np.exp(-5.5*y))
# total water depth considering full saturation
subs_z = lambda x: quad(integrad_mixed, 0.0358, x)
def get_ALT(arg_i, ifglen, seasonal_subs):
'''
Estimate ALT from seasonal subsidence value
'''
if np.int(np.mod(arg_i, 50000)) == 0:
print('in loop number : ', arg_i)
ind_len = np.mod(arg_i, ifglen) - 1
if np.mod(arg_i, ifglen) == 0:
ind_len = ifglen - 1
ind_wid = np.int(np.floor(arg_i/ifglen)) - 1
# check if masked
if maskbool[ind_len, ind_wid] == False:
return np.nan
seasubs = seasonal_subs[arg_i]
# mask if seasonal subsidence is negative
if seasubs < 0 :
return np.nan
# check if subsidence is too small --> porosity 0.9
if seasubs <= 0.0028 :
return seasubs*23/(2*0.9)
elif seasubs >= 0.0353 :
return .7+ (seasubs - 0.0353)*23/(2*0.44)
else:
val_ALT = leastsq(lambda z: subs_z(z) - seasubs, .5)
return val_ALT[0]
num_cores = multiprocessing.cpu_count()
get_ALT_ = partial(get_ALT, ifglen = ifglen, seasonal_subs= seasonal_subs)
output = Parallel(n_jobs=num_cores)(delayed(get_ALT_)(i) for i in range(numpixels))
ALT_data = np.array(output)
var_name = 'ALTdata.mat'
sio.savemat(var_name, {'ALT_data':ALT_data, 'lon':longitude, 'lat':latitude})
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
5979,
1160,
7769,
25,
3365,
25,
1120,
12131,
198,
198,
31,
9800,
25,
288,
15318,
28... | 2.377797 | 1,162 |
"""Types for geojson_pydantic models"""
from typing import Tuple, Union
NumType = Union[float, int]
BBox = Union[
Tuple[NumType, NumType, NumType, NumType], # 2D bbox
Tuple[NumType, NumType, NumType, NumType, NumType, NumType], # 3D bbox
]
Position = Union[Tuple[NumType, NumType], Tuple[NumType, NumType, NumType]]
| [
37811,
31431,
329,
4903,
13210,
1559,
62,
79,
5173,
5109,
4981,
37811,
198,
198,
6738,
19720,
1330,
309,
29291,
11,
4479,
198,
198,
33111,
6030,
796,
4479,
58,
22468,
11,
493,
60,
198,
33,
14253,
796,
4479,
58,
198,
220,
220,
220,
3... | 2.666667 | 123 |
MAX_ITERATIONS = 15
TOLERANCE = 0.001
# https://en.wikipedia.org/wiki/B%C3%A9zier_curve
# P0(0, 0), P1(x1, y1), P2(x2, y2), P3(1, 1)
# B(t) = ((1-t)^3)*P0 + (3t(1-t)^2)*P1 + (3t^2(1-t))*P2 + t^3*P3
# = (3t(1-t)^2)*P1 + (3t^2(1-t))*P2 + t^3
# Returns f(t) or z(t) by given value x1, x2 or y1, y2
# B`(t) = (3(1-t)^2)*P1 + (6t(1-t))(P2-P1) + 3t^2(P3-P2)
# Remember that P3 is always (1, 1)
# Returns dx/dt or dy/dt by given value x1, x2 or y1, y2
| [
22921,
62,
2043,
1137,
18421,
796,
1315,
201,
198,
51,
3535,
1137,
19240,
796,
657,
13,
8298,
201,
198,
201,
198,
2,
3740,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
33,
4,
34,
18,
4,
32,
24,
89,
959,
62,
22019,
303,
201,
... | 1.567213 | 305 |
# -*- encoding: utf-8 -*-
from .base import BaseConfig
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
764,
8692,
1330,
7308,
16934,
628
] | 2.666667 | 21 |
"""All URLs for the WeGovNow extension."""
from django.conf.urls import include, url
from rest_framework.urlpatterns import format_suffix_patterns
from geokey_wegovnow import views
# ###########################
# ADMIN VIEWS
# ###########################
adminpatterns = [
url(r'^admin/profile/settings/$',
views.UWUMProfileSettingsView.as_view(),
name='uwum_profile_settings'),
]
# ###########################
# PUBLIC API
# ###########################
apipatterns = [
url(r'^api/wegovnow/'
r'navigation/$',
views.UWUMNavigationAPIView.as_view(),
name='api_uwum_navigation'),
]
apipatterns = format_suffix_patterns(apipatterns, allowed=['json', 'raw_html'])
# ###########################
# COMBINED URLS
# ###########################
urlpatterns = [
url(
r'^', include(adminpatterns)),
url(
r'^', include(apipatterns)),
]
| [
37811,
3237,
32336,
329,
262,
775,
23774,
3844,
7552,
526,
15931,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
198,
6738,
1334,
62,
30604,
13,
6371,
33279,
82,
1330,
5794,
62,
37333,
844,
62,
3327... | 2.578348 | 351 |
import os
import json
import random
import boto3
from time import sleep
from notification import SnsNotification
region = os.getenv('AWS_REGION','ap-northeast-1')
SNS_ARN = os.environ['sns_arn']
CHANNEL = os.environ['channel']
REPO_OMMIT = os.environ['repo_ommit']
alb = boto3.client('elbv2', region_name=region)
ecs = boto3.client('ecs', region_name=region)
icon = 'https://s3-us-west-2.amazonaws.com/assets.site.serverless.com/blog/step-functions.png'
notify = SnsNotification(region, SNS_ARN, CHANNEL, username='ecs deity', icon_url=icon)
#-----------CREATE-------------
# ex. port_range = "8000-8100"
# input: { "cluster": CLUSTER, "service": SERVICE, "lb_arn": lb_rn, "port": port }
#-----------DELETE-------------
# input: { "cluster": CLUSTER, "service": SERVICE }
# input: { "cluster": CLUSTER, "service": SERVICE }
# input: { "cluster": CLUSTER, "service": SERVICE }
# input: { "cluster": CLUSTER, "service": SERVICE }
# input: { "cluster": CLUSTER, "service": SERVICE, "lb_arn": LB_ARN }
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
275,
2069,
18,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
14483,
1330,
5489,
82,
3673,
2649,
198,
198,
36996,
796,
28686,
13,
1136,
24330,
10786,
12298,
50,
62,
31553,
2849... | 2.681698 | 377 |
import re
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.remote import webdriver, webelement
| [
11748,
302,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
32103,
21321,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,... | 3.5 | 66 |
from __future__ import annotations
import os
import subprocess
import sys
import time
from dataclasses import dataclass
from threading import Timer
from typing import TypeVar
import cptk.constants
import cptk.utils
from cptk.core.system import System
from cptk.local.problem import LocalProblem
from cptk.scrape import Test
T = TypeVar('T')
@dataclass
class Chef:
""" Bake, serve and test local problems. """
@property
def bake(self) -> None:
""" Bakes (generates) the executable of the current problem solution.
If the recipe configuration file of the current problem doesn't specify
a 'bake' option, returns None quietly. """
if not self._problem.recipe.bake:
return
location = self._problem.location
System.log(f'Baking has begun ({self._using_string})')
start = time.time()
for cmd in self._problem.recipe.bake:
System.details(cmd)
res = self._runner.exec(cmd, wd=location, redirect=False)
if res.code:
raise BakingError(res.code, cmd)
seconds = time.time() - start
System.log(f'Solution is baked! (took {seconds:.02f} seconds)')
def serve(self) -> None:
""" Bakes the local problem (if a baking recipe is provided), and serves
it while piping the standard input to the executable. """
self.bake()
cmd = self._problem.recipe.serve
location = self._problem.location
System.log(f'Serving solution ({self._using_string})')
System.details(cmd)
res = self._runner.exec(cmd, wd=location, redirect=False)
System.abort(res.code)
def _load_tests(self) -> dict[str, Test]:
""" Returns a list of tests where the keys are their names and the
values are the test details. """
# TODO: the ideal solution will provide a way for the user to fully
# configure the way that the test inputs end expectations are stored.
# For now, we force a standard that uses treats all .in files inside
# the folder as input files, and all .out files as the expected outputs
folder = os.path.join(
self._problem.location,
self._problem.recipe.test.folder,
)
if not os.path.isdir(folder):
return dict()
files = [
item for item in os.listdir(
folder,
) if os.path.isfile(os.path.join(folder, item))
]
inputs = {
filename[:-len(cptk.constants.INPUT_FILE_SUFFIX)]
for filename in files
if filename.endswith(cptk.constants.INPUT_FILE_SUFFIX)
}
outputs = {
filename[:-len(cptk.constants.OUTPUT_FILE_SUFFIX)]
for filename in files
if filename.endswith(cptk.constants.OUTPUT_FILE_SUFFIX)
}
res = dict()
for name in inputs.intersection(outputs):
inpp = os.path.join(folder, name + cptk.constants.INPUT_FILE_SUFFIX)
with open(inpp, 'r', encoding='utf8') as file:
inp = file.read()
outp = os.path.join(folder, name + cptk.constants.OUTPUT_FILE_SUFFIX)
with open(outp, 'r', encoding='utf8') as file:
out = file.read()
res[name] = Test(inp, out)
return res
def test(self) -> None:
""" Bakes (if a baking recipe is provided) and serves the local tests
that are linked to the problem. """
if self._problem.recipe.test is None:
raise NoTestConfigurationError()
self.bake()
cmd = self._problem.recipe.serve
location = self._problem.location
timeout = self._problem.recipe.test.timeout
tests = self._load_tests()
LogFunc = System.title if tests else System.warn
LogFunc(f'Found {len(tests)} tests')
passed = 0
start = time.time()
for name, test in sorted(tests.items()):
res = self._runner.exec(
cmd, wd=location, input=test.input,
redirect=True, timeout=timeout,
)
if res.timed_out:
System.error('Execution timed out', title=name)
elif res.code:
System.error(f'Nonzero exit code {res.code}', title=name)
elif test.expected is not None and res.outs != test.expected:
System.error('Output differs from expectation', title=name)
else:
System.success('Output matches expectations', title=name)
passed += 1
seconds = time.time() - start
failed = len(tests) - passed
System.title(
f'{passed} passed and {failed} failed'
f' in {seconds:.2f} seconds',
)
System.abort(1 if failed else 0)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4704,
278,
1330,
5045,
263,
198,
6738,
19720,
1330,
... | 2.278012 | 2,133 |
import os
import re
from uuid import uuid4
from cs50 import SQL
from flask import Flask, flash, render_template, redirect, request, session, json
from flask_session import Session
from tempfile import mkdtemp
from helpers import login_required
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from pyrebase import pyrebase
from functools import wraps
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_TYPE"] = "filesystem"
app.config["SESSION_PERMANENT"] = True
Session(app)
UPLOAD_FOLDER = "./videos_cache"
# Configure file upload folder
VIDEO_EXTENSIONS = [".mp4", ".quicktime"]
IMAGE_EXTENSIONS = [".jpg", ".jpeg", ".png"]
app.config["UPLOAD_FOLDER"]= UPLOAD_FOLDER
# Configure pyrebase
config = {
"apiKey": "AIzaSyC0c1Ni5dqBYe4fx-j_j9RBVrfAbFRRtJs",
"authDomain": "sitecursos-fb0f8.firebaseapp.com",
"databaseURL": "https://sitecursos-fb0f8-default-rtdb.firebaseio.com",
"projectId": "sitecursos-fb0f8",
"storageBucket": "sitecursos-fb0f8.appspot.com",
"messagingSenderId": "527634793144",
"appId": "1:527634793144:web:6d943bc0ea3e4b4f9daa4d",
"measurementId": "G-8QYPZ8BGE2"
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
#db = SQL("mysql+pymysql://root:@localhost:3306/sitecursos")
db = SQL("mysql+pymysql://sql10403857:uTPYI6esSr@sql10.freemysqlhosting.net:3306/sql10403857")
@app.route("/")
@login_required
@app.route("/login", methods=["GET", "POST"])
@app.route("/logout")
@login_required
@app.route("/register", methods=["GET", "POST"])
@app.route("/register_course", methods=["GET", "POST"])
@login_required
@app.route("/search")
@login_required
@app.route("/course")
@login_required
@app.route("/buy", methods=["POST"])
@login_required
@app.route("/video")
@login_required | [
11748,
28686,
198,
11748,
302,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
6738,
50115,
1120,
1330,
16363,
198,
6738,
42903,
1330,
46947,
11,
7644,
11,
8543,
62,
28243,
11,
18941,
11,
2581,
11,
6246,
11,
33918,
198,
6738,
42903,
... | 2.585947 | 797 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from melange.common.pagination import AppUrl
from melange.common.pagination import AtomLink
from melange.common.pagination import PaginatedDataView
from melange.common.utils import find
from melange.tests import BaseTest
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
15069,
2813,
4946,
25896,
11419,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
... | 3.410646 | 263 |
from .some_function import some_function
from .SomeClass import SomeClass
from .SomeClass import SOME_CONSTANT
| [
198,
6738,
764,
11246,
62,
8818,
1330,
617,
62,
8818,
198,
6738,
764,
4366,
9487,
1330,
2773,
9487,
198,
6738,
764,
4366,
9487,
1330,
41670,
62,
10943,
2257,
8643,
198
] | 3.733333 | 30 |
import unittest
from project.figure.circle import Circle
| [
11748,
555,
715,
395,
198,
198,
6738,
1628,
13,
26875,
13,
45597,
1330,
16291,
628,
198
] | 3.75 | 16 |
#!/usr/bin/env python
# coding=utf-8
"""Module Description
Copyright (c) 2017 Jianfeng Li <lee_jianfeng@sjtu.edu.cn>
This code is free software; you can redistribute it and/or modify it
under the terms of the MIT License.
@sam: Sam File Class
@status: experimental
@version: 0.0.1
@author: Jianfeng Li
@contact: lee_jianfeng@sjtu.edu.cn
"""
from utils import *
from bam import BamFile
class SamFile(FundementalFile):
"""
Description:
Class Name: SAM File, need be point the file path and samplename(eg. A101A or A101C and A101T) to initial.
Method:
conver2bam:Use samtools to convert SAM format file to BAM format file.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
37811,
26796,
12489,
198,
15269,
357,
66,
8,
2177,
40922,
69,
1516,
7455,
1279,
7197,
62,
73,
666,
69,
1516,
31,
82,
73,
28047,
13,
15532,
13,
315... | 2.757322 | 239 |
import pytest
import numpy as np
import pandas as pd
from gym.spaces import Box
from tensortrade.features import FeatureTransformer
from tensortrade.features.stationarity import FractionalDifference
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
11550,
13,
2777,
2114,
1330,
8315,
198,
198,
6738,
11192,
419,
27585,
13,
40890,
1330,
27018,
8291,
16354,
198,
6738,
11192,
4... | 3.548387 | 62 |
"""Detection model trainer.
This file provides a generic training method to train a
DetectionModel.
"""
import datetime
import os
import tensorflow as tf
import time
import monopsr
from monopsr.builders import optimizer_builder, net_builder
from monopsr.core import checkpoint_utils
from monopsr.core import summary_utils
slim = tf.contrib.slim
def train(model, config):
"""Training function for detection models.
Args:
model: The detection model object
config: config object
"""
print('Training', config.config_name)
# Get configurations
model_config = model.model_config
train_config = config.train_config
# Create a variable tensor to hold the global step
global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
##############################
# Get training configurations
##############################
max_iterations = train_config.max_iterations
summary_interval = train_config.summary_interval
checkpoint_interval = \
train_config.checkpoint_interval
max_checkpoints = train_config.max_checkpoints_to_keep
paths_config = train_config.paths_config
logdir = paths_config.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
checkpoint_dir = paths_config.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = checkpoint_dir + '/' + model_config.model_type
global_summaries = set([])
# The model should return a dictionary of predictions
print('Building model...')
output_dict, gt_dict, output_debug_dict = model.build()
print('Done building model.')
# summary_histograms = train_config.summary_histograms
# summary_img_images = train_config.summary_img_images
# summary_bev_images = train_config.summary_bev_images
##############################
# Setup loss
##############################
losses_dict, total_loss = model.loss(output_dict, gt_dict)
# Optimizer
training_optimizer = optimizer_builder.build(
train_config.optimizer, global_summaries, global_step_tensor)
# Create the train op
print('Creating train_op')
with tf.variable_scope('train_op'):
train_op = slim.learning.create_train_op(
total_loss,
training_optimizer,
clip_gradient_norm=1.0,
global_step=global_step_tensor)
print('Done creating train_op')
# Save checkpoints regularly.
saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)
# Add the result of the train_op to the summary
tf.summary.scalar('training_loss', train_op)
# Add maximum memory usage summary op
# This op can only be run on device with gpu, so it's skipped on Travis
if 'TRAVIS' not in os.environ:
tf.summary.scalar('bytes_in_use', tf.contrib.memory_stats.BytesInUse())
tf.summary.scalar('max_bytes', tf.contrib.memory_stats.MaxBytesInUse())
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_merged = summary_utils.summaries_to_keep(
summaries,
global_summaries,
# histograms=summary_histograms,
# input_imgs=summary_img_images,
# input_bevs=summary_bev_images
)
allow_gpu_mem_growth = config.allow_gpu_mem_growth
if allow_gpu_mem_growth:
# GPU memory config
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = allow_gpu_mem_growth
sess = tf.Session(config=sess_config)
else:
sess = tf.Session()
# Create unique folder name using datetime for summary writer
datetime_str = str(datetime.datetime.now())
logdir = logdir + '/train'
train_writer = tf.summary.FileWriter(logdir + '/' + datetime_str, sess.graph)
# Create init op
init = tf.global_variables_initializer()
# Parse type and location of pretrained weights
net_config = net_builder.get_net_config(model_config)
pretrained_weights_type = getattr(net_config, 'pretrained_weights_type', None)
if pretrained_weights_type is not None:
pretrained_weights_dir = os.path.join(
monopsr.data_dir(), 'pretrained', net_config.pretrained_weights_name)
pretrained_weights_path = tf.train.get_checkpoint_state(
pretrained_weights_dir).model_checkpoint_path
else:
pretrained_weights_path = None
# Overwrite existing checkpoints or continue from last saved checkpoint
if train_config.overwrite_checkpoints:
# Initialize the variables
sess.run(init)
if pretrained_weights_type == 'slim':
# Scope is resnet_v2_50 or resnet_v2_101 or vgg_16
scope = net_config.pretrained_weights_name[:-11]
checkpoint_utils.restore_weights_by_scope(sess, pretrained_weights_path, scope)
elif pretrained_weights_type == 'obj_detection_api':
checkpoint_utils.restore_obj_detection_api_weights(
sess, model, pretrained_weights_path)
elif pretrained_weights_type == 'all':
saver.restore(sess, pretrained_weights_path)
else:
print('Pre-trained weights are not being used.')
else:
# Look for existing checkpoints
checkpoint_utils.load_checkpoints(checkpoint_dir, saver)
if len(saver.last_checkpoints) > 0:
checkpoint_to_restore = saver.last_checkpoints[-1]
saver.restore(sess, checkpoint_to_restore)
else:
# Initialize the variables
sess.run(init)
if pretrained_weights_type == 'slim':
# Scope is either resnet_v2_50 or resnet_v2_101
scope = net_config.pretrained_weights_name[:-11]
checkpoint_utils.restore_weights_by_scope(sess, pretrained_weights_path, scope)
elif pretrained_weights_type == 'obj_detection_api':
checkpoint_utils.restore_obj_detection_api_weights(sess, model,
pretrained_weights_path)
elif pretrained_weights_type == 'all':
saver.restore(sess, pretrained_weights_path)
else:
print('Pre-trained weights are not being used.')
# Read the global step if restored
global_step = tf.train.global_step(sess, global_step_tensor)
print('Starting from step {} / {}'.format(global_step, max_iterations))
# Main Training Loop
last_time = time.time()
for step in range(global_step, max_iterations + 1):
# Save checkpoint
if step % checkpoint_interval == 0:
global_step = tf.train.global_step(sess, global_step_tensor)
saver.save(sess, save_path=checkpoint_prefix, global_step=global_step)
print('{}: Step {} / {}: Checkpoint saved to {}-{:08d}'.format(
config.config_name, step, max_iterations,
checkpoint_prefix, global_step))
# Create feed_dict for inferencing
feed_dict, sample_dict = model.create_feed_dict()
# DEBUG
# output = sess.run(output_dict, feed_dict=feed_dict)
# output_debug = sess.run(output_debug_dict, feed_dict=feed_dict)
# loss_debug = sess.run(losses_dict, feed_dict=feed_dict)
# Write summaries and train op
if step % summary_interval == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = current_time
train_op_loss, summary_out = sess.run([train_op, summary_merged], feed_dict=feed_dict)
print('{}: Step {}: Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
config.config_name, step, train_op_loss, time_elapsed))
train_writer.add_summary(summary_out, step)
else:
# Run the train op only
sess.run(train_op, feed_dict)
# Close the summary writers
train_writer.close()
| [
37811,
11242,
3213,
2746,
21997,
13,
198,
198,
1212,
2393,
3769,
257,
14276,
3047,
2446,
284,
4512,
257,
198,
11242,
3213,
17633,
13,
198,
37811,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
... | 2.440574 | 3,273 |
# Net01.py
# Server
# command A 창에서 실행
from socket import *
svrIP = '59.29.224.54' # Host IP
svrPort = 64000
svrAddr = (svrIP,svrPort)
svrSocket = socket(AF_INET,SOCK_STREAM)
svrSocket.bind(svrAddr)
svrSocket.listen(0)
# Listening 상태 완료
# Connect 허락
cliSocket, cliAddr = svrSocket.accept()
# 메세지 전송(send)
cliSocket.send(b"Welcome Py Server")
# 메세지 수신(recv)
data = cliSocket.recv(1024)
print(data.decode('utf-8'))
| [
2,
3433,
486,
13,
9078,
198,
2,
9652,
198,
2,
3141,
317,
23821,
108,
121,
168,
245,
238,
168,
226,
250,
23821,
233,
97,
169,
244,
231,
220,
198,
6738,
17802,
1330,
1635,
198,
21370,
81,
4061,
796,
705,
3270,
13,
1959,
13,
24137,
... | 1.767932 | 237 |
from scapy.all import *
from someip_fuzzer.config import config
from someip_fuzzer.log import log_info
from someip_fuzzer.types import *
from queue import Queue
import threading
import time
load_contrib("automotive.someip")
| [
6738,
629,
12826,
13,
439,
1330,
1635,
198,
6738,
617,
541,
62,
69,
4715,
263,
13,
11250,
1330,
4566,
198,
6738,
617,
541,
62,
69,
4715,
263,
13,
6404,
1330,
2604,
62,
10951,
198,
6738,
617,
541,
62,
69,
4715,
263,
13,
19199,
1330... | 3.082192 | 73 |
#!/usr/bin/env python3
import pathlib
import typing as t
import mimetypes
mimetypes.add_type("text/gemini", ".gmi")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
11748,
3108,
8019,
198,
11748,
19720,
355,
256,
198,
11748,
17007,
2963,
12272,
628,
198,
76,
320,
2963,
12272,
13,
2860,
62,
4906,
7203,
5239,
14,
24090,
5362,
1600,
27071,
7... | 2.673913 | 46 |
import re
from setuptools import setup, find_packages
if __name__ == '__main__':
# get requirements
with open('requirements.txt') as f:
requirements = f.read()
requirements = [
r for r in requirements.splitlines() if r != '']
# get readme
with open('README.rst') as f:
readme = f.read()
# get version number
with open('pgsheets/__init__.py') as f:
version = f.read()
version = re.search(
r'^__version__\s*=\s*[\'"]([\d\.]*)[\'"]\s*$',
version,
re.MULTILINE).groups(1)[0]
setup(name='pgsheets',
version=version,
packages=find_packages(exclude=['test', 'test.*']),
author="Henry Stokeley",
author_email="henrystokeley@gmail.com",
description=("Manipulate Google Sheets Using Pandas DataFrames"),
long_description=readme,
license="MIT",
url="https://github.com/henrystokeley/pgsheets",
install_requires=requirements,
test_suite='test',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Topic :: Office/Business :: Financial :: Spreadsheet',
],
keywords='pandas google sheets spreadsheets dataframe',
)
| [
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
1303,
651,
5359,
198,
220,
220,
220,
351,
1280,
10786,
8897,
18883... | 2.181955 | 665 |
from dataclasses import dataclass
from datetime import datetime
from app.core import (
DbAddTransactionIn,
DbAddTransactionOut,
DbAddUserIn,
DbAddUserOut,
DbAddWalletIn,
DbAddWalletOut,
DbFetchStatisticsIn,
DbFetchStatisticsOut,
DbGetUserWalletCountIn,
DbGetUserWalletCountOut,
DbGetWalletIn,
DbGetWalletOut,
DbUpdateCommissionStatsIn,
DbUpdateCommissionStatsOut,
DbUpdateWalletBalanceIn,
DbUpdateWalletBalanceOut,
DbUserTransactionsOutput,
DbWalletTransactionsOutput,
IBTCWalletRepository,
)
from app.utils.result_codes import ResultCode
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
598,
13,
7295,
1330,
357,
198,
220,
220,
220,
360,
65,
4550,
48720,
818,
11,
198,
220,
220,
220,
360,
65,
4550,
48720,
7975,
11... | 2.522222 | 270 |
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as inte
import astropy.constants as con
import astropy.units as u
from tqdm import tqdm
import os
import utils as utl
import irlf as irlf
# LF Parameters
zdo = np.array([0.0, 0.3, 0.45, 0.6, 0.8, 1.0, 1.2, 1.7, 2.0, 2.5, 3.0, 4.2, 5.0])
zup = np.array([0.3, 0.45, 0.6, 0.8, 1.0, 1.2, 1.7, 2.0, 2.5, 3.0, 4.2, 5.0, 6.0])
zcen = (zdo + zup)/2
alp, alp_err = 1.28*np.ones(len(zcen)), 0.295*np.ones(len(zcen))
logl, logl_err = np.array([10.02, 9.97, 10.01, 10.23, 10.23, 10.52, 10.61, 10.75, 11.13, 11.16, 10.86, 11.24, 11.33]),\
np.array([0.58, 0.545, 0.49, 0.515, 0.505, 0.515, 0.48, 0.495, 0.46, 0.49, 0.45, 0.60, 0.48])
logp, logp_err = np.array([-2.30, -1.98, -1.92, -1.95, -1.75, -2.00, -2.04, -2.35, -2.73, -2.76, -2.73, -3.29, -3.51]),\
np.array([0.415, 0.365, 0.325, 0.29, 0.335, 0.305, 0.28, 0.305, 0.28, 0.295, 0.295, 0.64, 0.645])
sig, sig_err = 0.65*np.ones(len(zcen)), 0.08*np.ones(len(zcen))
# Lower limit of integration
limit1 = 1e8*((con.L_sun.to(u.erg/u.s)).value)
# Defining Kappa and the range of luminosities over which we want to perform integration
kap_ir = 4.5*10**(-44)
lums_ir1 = np.logspace(10, 13, 10000)*(con.L_sun.to(u.erg/u.s).value)
# Location of the results file
p2 = os.getcwd() + '/Results/'
def lum_den22(lum, lst9, lst9err, phi9, phi9err, sig9, sig9err, alp9, alp9err, limit):
"""
Function to calculate luminosity density
----------------------------------------
Parameters:
-----------
lum : float, numpy.ndarray
luminosity range
lst9, phi9, sig9, alp9 : float
LF parameters
lst9err, phi9err, sig9err, alp9err : float
errors in LF parameters
limit : float
lower limit of the intensity
as a function of L*
default is 0.03 (from Madau&Dickinson)
-----------
return
-----------
numpy.ndarray :
an array of luminosity density
"""
# Values of Parameters
# For L*
lst7 = np.random.lognormal(lst9*np.log(10), lst9err*np.log(10), 10000)
lst2 = (lst7)*((con.L_sun.to(u.erg/u.s)).value)
#print('\nL*')
#print(np.mean(lst2))
#print(np.std(lst2))
phi7 = np.random.lognormal(phi9*np.log(10), phi9err*np.log(10), 10000)
phi2 = phi7
#print('\nphi*')
#print(np.mean(phi2))
#print(np.std(phi2))
# For alpha and sigma
alp2 = np.random.normal(alp9, alp9err, 10000)
sig2 = np.random.normal(sig9, sig9err, 10000)
# Values of luminosities
nor_lum = np.logspace(np.log10(limit*np.mean(lst2)), np.max(np.log10(lum)), 1000000)
# Integration array
rho2 = np.array([])
# Integration starts
for i in tqdm(range(10000)):
if lst2[i] < 0 :#alp2[i] != alp2[i] or lum2[i] != lum2[i] or lum2[i] == 0 or phi2[i] != phi2[i]:
continue
else:
#nor_lum = np.logspace(np.log10(limit*lst9), np.max(np.log10(lum)), 100000)
nor_sc1 = irlf.sandage(lums9=nor_lum, alp9=alp2[i], phi9=phi2[i], sig9=sig2[i], lst9=lst2[i])
nor_sc = nor_lum*nor_sc1#/phi2[j]
rho_nor = inte.simps(y=nor_sc, x=np.log10(nor_lum))
rho2 = np.hstack((rho2, rho_nor))
#print("\nlength: ")
#print(len(rho2))
#print(np.mean(rho2))
return rho2
def sfrd_w_err(lum, lst9, lst9err, phi9, phi9err, sig9, sig9err, alp9, alp9err, kappa, limit):
"""
Function to calculate star formation rate density
-------------------------------------------------
Parameters:
-----------
lum : float, numpy.ndarray
luminosity range
lst9, phi9, sig9, alp9 : float
LF parameters
lst9err, phi9err, sig9err, alp9err : float
errors in LF parameters
kappa : float
conversion factor b/w luminosity density and
star formation rate
limit : float
lower limit of the intensity
as a function of L*
default is 0.03 (from Madau&Dickinson)
-----------
return
-----------
float
mean star formation rate
float
error in star formation rate
"""
lum_den2 = lum_den22(lum, lst9, lst9err, phi9, phi9err, sig9, sig9err, alp9, alp9err, limit)
kpp1 = kappa
sfr2 = kpp1*lum_den2
return np.mean(sfr2), np.std(sfr2)
#"""
#sfrd_ir, sfrd_err_ir = sfrd_w_err(lum=lums_ir1, lst9=logl[0], lst9err=logl_err[0], \
#phi9=logp[0], phi9err=logp_err[0], sig9=sig[0], sig9err=sig_err[0], alp9=alp[0], \
#alp9err=alp_err[0], kappa=kap_ir, limit=limit1)
#print(sfrd_ir)
#print(sfrd_err_ir)
f33 = open(p2 + 'sfrd_wang_new.dat','w')
f33.write('#Name_of_the_paper\tZ_down\tZ_up\tSFRD\tSFRD_err\n')
err_s = np.loadtxt(p2 + 'wang.dat', usecols=4, unpack=True)
# Without errors
for i in range(len(zcen)):
lt11 = 0.00001/kap_ir
sam = np.logspace(np.log10(lt11), np.max(np.log10(lums_ir1)), 100000)
lf = irlf.sandage(lums9=sam, alp9=alp[i], phi9=10**logp[i], sig9=sig[i], lst9=(10**logl[i])*(con.L_sun.to(u.erg/u.s).value))
nor = sam*lf
rho = inte.simps(y=nor, x=np.log10(sam))
sfrd = rho*kap_ir
print('For redshift: ', zcen[i])
print('SFRD: ', sfrd)
print('log(SFRD): ', np.log10(sfrd))
f33.write('Wang_et_al_2019' + '\t' + str(zdo[i]) + '\t' + str(zup[i]) + '\t' + str(sfrd) + '\t' + str(err_s[i]) + '\n')
f33.close()
"""
# Performing the integration
f33 = open(p2 + 'sfrd_wang_new.dat','w')
f33.write('#Name_of_the_paper\tZ_down\tZ_up\tSFRD\tSFRD_err\n')
for j in range(len(zcen)):
sfrd_ir, sfrd_err_ir = sfrd_w_err(lum=lums_ir1, lst9=logl[j], lst9err=logl_err[j], \
phi9=logp[j], phi9err=logp_err[j], sig9=sig[j], sig9err=sig_err[j], alp9=alp[j], \
alp9err=alp_err[j], kappa=kap_ir, limit=0.03)
f33.write('Wang_et_al_2019' + '\t' + str(zdo[j]) + '\t' + str(zup[j]) + '\t' + str(sfrd_ir) + '\t' + str(sfrd_err_ir) + '\n')
f33.close()
""" | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
629,
541,
88,
13,
18908,
4873,
355,
493,
68,
198,
11748,
6468,
28338,
13,
9979,
1187,
355,
369,
198,
11748,
6468,
28338,
13,
4166... | 1.941118 | 3,023 |
import base64
WRITE_MODE = 'w'
READ_MODE = 'r'
READ_WRITE_MODE = 'r+'
READ_BINARY_MODE = 'rb'
WRITE_BINARY_MODE = 'wb'
KEY_MAP_FILE_NAME = 'key.map'
FILE_MAP_FILE_NAME = 'file.map'
POSITION_MAP_FILE_NAME = 'position.map'
STASH_FOLDER_NAME = 'stash'
CLOUD_MAP_FILE_NAME = 'cloud.map'
LOG_FILE_NAME = 'pyoram.log'
| [
11748,
2779,
2414,
198,
198,
18564,
12709,
62,
49058,
796,
705,
86,
6,
198,
15675,
62,
49058,
796,
705,
81,
6,
198,
15675,
62,
18564,
12709,
62,
49058,
796,
705,
81,
10,
6,
198,
15675,
62,
33,
1268,
13153,
62,
49058,
796,
705,
261... | 2.12 | 150 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_MEASURES_COMPONENT_ENDPOINT,
API_MEASURES_COMPONENT_TREE_ENDPOINT,
API_MEASURES_SEARCH_HISTORY_ENDPOINT,
)
from sonarqube.utils.common import GET, PAGES_GET
class SonarQubeMeasures(RestClient):
"""
SonarQube measures Operations
"""
special_attributes_map = {"from_date": "from", "to_date": "to"}
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeMeasures, self).__init__(**kwargs)
@GET(API_MEASURES_COMPONENT_ENDPOINT)
def get_component_with_specified_measures(
self,
component,
metricKeys,
branch=None,
pullRequest=None,
additionalFields=None,
):
"""
SINCE 5.4
Return component with specified measures.
:param component: Component key
:param branch: Branch key.
:param pullRequest: Pull request id.
:param additionalFields: Comma-separated list of additional fields that can be returned in the response.
Possible values are for: metrics,periods
:param metricKeys: Comma-separated list of metric keys. Possible values are for: ncloc,complexity,violations
:return:
"""
@PAGES_GET(API_MEASURES_COMPONENT_TREE_ENDPOINT, item="components")
def get_component_tree_with_specified_measures(
self,
component,
metricKeys,
branch=None,
pullRequest=None,
asc="true",
additionalFields=None,
metricPeriodSort=None,
metricSort=None,
metricSortFilter="all",
ps=None,
q=None,
s="name",
qualifiers=None,
strategy="all",
):
"""
SINCE 5.4
Navigate through components based on the chosen strategy with specified measures. The baseComponentId or
the component parameter must be provided.
:param component: Component key.
:param branch: Branch key.
:param pullRequest: Pull request id.
:param metricKeys: Comma-separated list of metric keys. Possible values are for: ncloc,complexity,violations
:param additionalFields: Comma-separated list of additional fields that can be returned in the response.
Possible values are for: metrics,periods
:param asc: Ascending sort, Possible values are for: true, false, yes, no. default value is true.
:param metricPeriodSort: Sort measures by leak period or not ?. The 's' parameter must contain
the 'metricPeriod' value
:param metricSort: Metric key to sort by. The 's' parameter must contain the 'metric' or 'metricPeriod' value.
It must be part of the 'metricKeys' parameter
:param metricSortFilter: Filter components. Sort must be on a metric. Possible values are:
* all: return all components
* withMeasuresOnly: filter out components that do not have a measure on the sorted metric
default value is all.
:param ps: Page size. Must be greater than 0 and less or equal than 500
:param q: Limit search to:
* component names that contain the supplied string
* component keys that are exactly the same as the supplied string
:param qualifiers:Comma-separated list of component qualifiers. Filter the results with
the specified qualifiers. Possible values are:
* BRC - Sub-projects
* DIR - Directories
* FIL - Files
* TRK - Projects
* UTS - Test Files
:param s: Comma-separated list of sort fields,Possible values are for: name, path, qualifier, metric, metricPeriod.
and default value is name
:param strategy: Strategy to search for base component descendants:
* children: return the children components of the base component. Grandchildren components are not returned
* all: return all the descendants components of the base component. Grandchildren are returned.
* leaves: return all the descendant components (files, in general) which don't have other children.
They are the leaves of the component tree.
default value is all.
:return:
"""
@PAGES_GET(API_MEASURES_SEARCH_HISTORY_ENDPOINT, item="measures")
def search_measures_history(
self,
component,
metrics,
branch=None,
pullRequest=None,
from_date=None,
to_date=None,
):
"""
SINCE 6.3
Search measures history of a component
:param component: Component key.
:param branch: Branch key.
:param pullRequest: Pull request id.
:param metrics: Comma-separated list of metric keys.Possible values are for: ncloc,coverage,new_violations
:param from_date: Filter measures created after the given date (inclusive).
Either a date (server timezone) or datetime can be provided
:param to_date: Filter measures created before the given date (inclusive).
Either a date (server timezone) or datetime can be provided
:return:
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
449,
498,
15483,
16380,
198,
6738,
3367,
283,
421,
1350,
13,
26791,
13,
2118,
62,
16366,
1330,
8324,
... | 2.548728 | 2,083 |
import HoodTracker
from CommonUtils import *
import PySide2.QtWidgets as QtWidgets
| [
11748,
17233,
35694,
198,
6738,
8070,
18274,
4487,
1330,
1635,
198,
11748,
9485,
24819,
17,
13,
48,
83,
54,
312,
11407,
355,
33734,
54,
312,
11407,
198
] | 3.074074 | 27 |
"""Abstract object containing information about plots."""
# This file is part of the 'tomate' project
# (http://github.com/Descanonge/tomate) and subject
# to the MIT License as defined in the file 'LICENSE',
# at the root of this project. © 2020 Clément HAËCK
from typing import Any, Dict, List, Union, TYPE_CHECKING
from matplotlib.axes import Axes
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tomate.coordinates.time import Time
from tomate.custom_types import Array, KeyLikeInt, KeyLikeValue
from tomate.keys.key import KeyValue
from tomate.keys.keyring import Keyring
from tomate.scope import Scope
if TYPE_CHECKING:
from tomate.db_types.plotting.data_plot import DataPlot
class PlotObjectABC():
"""Object containing information about plots.
And methods for acting on that plot.
Subclasses are to be made for different types of plot object,
such as lines, 2D images, contours, ...
:attr db: DataBase:
:attr scope: Scope: Scope of plotted data.
If data is to be fetched from database, ought to be a child of its
loaded scope, its parent keyring should have the correct dimension.
:attr target_scope: Union[str, Scope]: Scope to fetch data from. Can
be the name of a scope, in which case the corresponding scope from
the database is used.
:attr ax: matplotlib.axes.Axes:
:attr cax: matplotlib.axes.Axes: Colorbar axis.
:attr object: Any: Object returned by matplotlib.
:attr colorbar: matplotlib.colorbar.Colorbar: Colorbar object.
:attr data: Optional[Array]: If not None, data to use (instead of fetching
it from database).
:attr kwargs: Dict[Any]: Keyword arguments to use for creating plot.
:attr axes: List[str]: Dimensions and variables name, in order of axes
(x, y, [z], [color]).
:attr var_idx: Union[int, List[int]]: Positions of plotted variables
in `axes` attribute.
"""
DIM = 0 #: Dimension of the data to plot.
@property
def keyring(self) -> Keyring:
"""Keyring to use for fetching data."""
return self.scope.parent_keyring
def update_scope(self, **keys: KeyLikeInt):
"""Update some dimensions scope.
Only change specified dimensions.
Acts on the parent scope of `scope` attribute.
"""
keyring = self.keyring
for dim, key in keys.items():
keyring[dim] = key
self.reset_scope(keyring)
def update_scope_by_value(self, **keys: KeyLikeValue):
"""Update some dimensions scope by value.
Only change specified dimensions.
Acts on the parent scope of `scope` attribute.
"""
keys_ = {}
for dim, key in keys.items():
keys_[dim] = KeyValue(key).apply(self.scope.dims[dim])
self.update_scope(**keys_)
def reset_scope(self, keyring: Keyring = None, **keys: KeyLikeInt):
"""Reset scope.
Acts on the parent scope of `scope` attribute.
"""
scope = self.db.get_subscope(self._target_scope, keyring,
int2list=False, **keys)
if scope.var != self.scope.var:
self._update_variables(scope.var[:].tolist())
self.scope = scope
def _update_variables(self, var: List[str]):
"""Update variables plotted."""
if isinstance(self.var_idx, int):
var_idx = [self.var_idx]
else:
var_idx = self.var_idx
for i, v in zip(var_idx, var):
self.axes[i] = v
def reset_scope_by_value(self, **keys: KeyLikeValue):
"""Reset scope.
Acts on the parent scope of `scope` attribute.
"""
scope = self.db.get_subscope_by_value(self._target_scope,
int2list=False, **keys)
self.scope = scope
def get_data(self) -> Array:
"""Retrieve data for plot.
Either from `data` attribute if specified, or from database.
"""
if self.data is not None:
return self.data
self.check_keyring()
return self._get_data()
def _get_data(self) -> Array:
"""Retrieve data from database."""
raise NotImplementedError()
def check_keyring(self):
"""Check if keyring has correct dimension.
:raises IndexError:
"""
dim = len(self.keyring.get_high_dim())
if dim != self.DIM:
raise IndexError("Data to plot does not have right dimension"
f" (is {dim}, expected {self.DIM})")
def find_axes(self, axes: List[str] = None) -> List[str]:
"""Get list of axes.
Find to what correspond the figures axes from plot object keyring.
:param axes: [opt] Supply axes instead of guessing from keyring.
"""
raise NotImplementedError()
@classmethod
def create(cls, db: 'DataPlot', ax: Axes,
scope: Union[str, Scope] = 'loaded',
axes: List[str] = None, data=None,
kwargs: Dict[str, Any] = None,
**keys: KeyLikeInt):
"""Create plot object."""
scope_obj = db.get_subscope_by_value(scope, name='plotted',
**keys, int2list=False)
if scope_obj.var.size == 1:
dims = db[scope_obj.var[0]].dims
scope_obj.slice(**{d: 0 for d in db.dims if d not in dims},
int2list=False)
if kwargs is None:
kwargs = {}
po = cls(db, ax, scope_obj, axes, data, **kwargs)
po._target_scope = scope
po.axes = po.find_axes(axes)
return po
def set_kwargs(self, replace: bool = True, **kwargs: Any):
"""Set plot options.
:param replace: If True (default), overwrite options already stored
"""
if replace:
self.kwargs.update(kwargs)
else:
kwargs.update(self.kwargs)
self.kwargs = kwargs
def set_plot(self):
"""Create or update plot."""
if self.object is None:
self.create_plot()
else:
self.update_plot()
def create_plot(self):
"""Plot data."""
raise NotImplementedError()
def remove(self):
"""Remove plot from axes."""
self.object.remove()
self.object = None
def update_plot(self, **keys: KeyLikeInt):
"""Update plot.
:param keys: [opt] Keys to change, as for `update_scope`.
"""
self.update_scope(**keys)
self.remove()
self.create_plot()
def set_limits(self):
"""Change axis limits to data."""
self.ax.set_xlim(*self.get_limits(self.axes[0]))
self.ax.set_ylim(*self.get_limits(self.axes[1]))
def get_limits(self, name):
"""Retrieve limits for one of the axis.
:param name: Coordinate or variable name.
"""
if name in self.scope.coords:
dim = self.scope[name]
if isinstance(dim, Time):
limits = dim.index2date([0, -1], pydate=True)
else:
limits = dim.get_limits()
else:
vmin = self.db.vi.get_attribute_default(name, 'vmin')
vmax = self.db.vi.get_attribute_default(name, 'vmax')
limits = vmin, vmax
return limits
def add_colorbar_axis(self, loc, size, pad, **kwargs):
"""Add axis for colorbar."""
divider = make_axes_locatable(self.ax)
self.cax = divider.append_axes(loc, size, pad, **kwargs)
def add_colorbar(self, loc: str = "right",
size: float = .1,
pad: float = 0.,
**kwargs):
"""Add colorbar.
:param loc: {'left', 'right', 'bottom', 'top'}
"""
self.add_colorbar_axis(loc, size, pad, **kwargs)
self.colorbar = self.ax.figure.colorbar(self.object, cax=self.cax, ax=self.ax)
def _get_label(self, name: str, fullname: Union[bool, str],
units: Union[bool, str]):
"""Get label for axis.
:param name: Coordinate or variable name.
:param fullname: If True, use fullname if available.
'fullname' attribute from a coordinate or the VI is used.
If `fullname` is a string, use that attribute instead in the VI.
:param units: If True, add units to label if available.
'fullname' attribute from a coordinate or the VI is used.
If `fullname` is a string, use that attribute instead in the VI.
"""
if name in self.scope.coords:
label = self.scope[name].fullname
if not label or not fullname:
label = name
if units:
c_units = self.scope[name].units
if c_units:
label += ' [{}]'.format(c_units)
else:
attr = fullname if isinstance(fullname, str) else 'fullname'
label = self.db.vi.get_attribute_default(name, attr)
if label is None or not fullname:
label = name
if units:
attr = units if isinstance(units, str) else 'units'
v_units = self.db.vi.get_attribute_default(name, 'units')
if v_units:
label += ' [{}]'.format(v_units)
return label
def set_labels(self, axes: Union[str, List[str]] = None,
fullname: Union[bool, str] = True,
units: Union[bool, str] = True):
"""Set axes labels.
Set colorbar labels if present.
:param axes: Axes to set labels to, can be 'x', 'y', 'colorbar' or 'cbar'.
If None, all are set.
:param fullname: If True, use fullname if available.
'fullname' attribute from a coordinate or the VI is used.
If `fullname` is a string, use that attribute instead in the VI.
:param units: If True, add units to label if available.
'fullname' attribute from a coordinate or the VI is used.
If `fullname` is a string, use that attribute instead in the VI.
"""
if axes is None:
axes = ['X', 'Y']
if self.colorbar is not None:
axes.append('colorbar')
elif not isinstance(axes, (list, tuple)):
axes = [axes]
for ax in axes:
if ax.upper() == 'X':
name = self.axes[0]
f = self.ax.set_xlabel
elif ax.upper() == 'Y':
name = self.axes[1]
f = self.ax.set_ylabel
elif ax.upper() in ['COLORBAR', 'CBAR']:
name = self.axes[-1]
f = self.colorbar.set_label
else:
raise KeyError(f"Axis name not recognized ({ax}).")
label = self._get_label(name, fullname, units)
if label is not None:
f(label)
| [
37811,
23839,
2134,
7268,
1321,
546,
21528,
526,
15931,
198,
198,
2,
770,
2393,
318,
636,
286,
262,
705,
39532,
378,
6,
1628,
198,
2,
357,
4023,
1378,
12567,
13,
785,
14,
24564,
272,
14220,
14,
39532,
378,
8,
290,
2426,
198,
2,
28... | 2.181818 | 5,038 |
# Reference Source: https://github.com/llSourcell/linear_regression_live/blob/master/demo.py
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, mean_squared_log_error, average_precision_score
if __name__ == "__main__":
lr = LinearRegression("YearPredictionMSD/YearPredictionMSD.txt")
#X_train, X_test, y_train, y_test = train_test_split(lr.X, lr.y, test_size = 0.2, random_state = 1)
# This split is provided by the repository. It avoids the 'producer effect' by making sure no song from a given artist ends up in both the train and test set.
X_train, y_train = StandardScaler().fit_transform(lr.X[:lr.division]), lr.y[:lr.division]
X_test, y_test = StandardScaler().fit_transform(lr.X[lr.division:]), lr.y[lr.division:]
split_size = X_train.shape[0]//lr.cv_splits
ev = []
mae = []
rmse = []
msle = []
r2 = []
global_mae = []
lambdas = []
best_mae = 10
best_l1 = 0
b, W = None, None
df = pd.DataFrame(np.concatenate((X_train,y_train[:, None]), axis = 1), columns = list(range(90, -1, -1)))
df = shuffle(df)
X_train = df.drop([0], axis = 1)
y_train = df[0]
for _ in range(8):
ev = []
mae = []
rmse = []
msle = []
r2 = []
print("Training and Testing for Lambda ", lr.l1_lambda)
for i in range(lr.cv_splits):
print("Cross Validation for Split ", i+1)
start = i * split_size
end = (i+1) * split_size
X = np.concatenate((X_train[:start], X_train[end:]), axis = 0)
y = np.concatenate((y_train[:start], y_train[end:]), axis=0)
b = np.random.normal()
W = np.random.normal(size=lr.X.shape[1])
b, W, cost_graph = lr.gradient_descent_runner(X, y, b, W)
plt.plot(range(lr.num_iterations), np.log(cost_graph))
plt.title("Number of Iterations vs Cost")
plt.show()
X, y = X_train[start:end], y_train[start:end]
h = lr.hypothesis(b, W, X)
ev.append(explained_variance_score(y, h))
print("Explained Variance : ", ev[-1])
mae.append(mean_absolute_error(y, h))
print("Mean Absolute Error : ", mae[-1])
rmse.append(mean_squared_error(y, h) ** .5)
print("Root Mean Squared Error : ", rmse[-1])
msle.append(mean_squared_log_error(y, h))
print("Mean Squared Log Error : ", msle[-1])
r2.append(r2_score(y, h))
print("R2 Score : ", r2[-1])
global_mae.append(np.average(mae))
lambdas.append(lr.l1_lambda)
if best_mae > global_mae[-1]:
best_mae = global_mae[-1]
best_l1 = lr.l1_lambda
lr.l1_lambda *= 3 # 3 is so that we can check for points between 0 and 1 at each power of 10
print("Test Data")
lr.l1_lambda = best_l1
print("With best hyperparameter lambda ", lr.l1_lambda)
b = np.random.normal(scale=1 / X_train.shape[1] ** .5)
W = np.random.normal(scale=1 / X_train.shape[1] ** .5, size=X_train.shape[1])
b, W, cost_graph = lr.gradient_descent_runner(X_train, y_train, b, W)
np.save("LLRWeights.npy", np.append(W, b))
h = lr.hypothesis(b, W, X_test)
ev.append(explained_variance_score(y_test, h))
print("Explained Variance : ", ev[-1])
mae.append(mean_absolute_error(y_test, h))
print("Mean Absolute Error : ", mae[-1])
rmse.append(mean_squared_error(y_test, h) ** .5)
print("Root Mean Squared Error : ", rmse[-1])
msle.append(mean_squared_log_error(y_test, h))
print("Mean Squared Log Error : ", msle[-1])
r2.append(r2_score(y_test, h))
print("R2 Score : ", r2[-1])
plt.plot(np.log(lambdas), global_mae)
plt.title("Lasso Regression")
plt.xlabel("Log of Lambda")
plt.ylabel("Mean Absolute Error")
plt.show()
| [
2,
20984,
8090,
25,
3740,
1378,
12567,
13,
785,
14,
297,
7416,
297,
14,
29127,
62,
2301,
2234,
62,
12583,
14,
2436,
672,
14,
9866,
14,
9536,
78,
13,
9078,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
459... | 2.153646 | 1,920 |
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='sparsedlist',
version='0.4',
packages=[''],
url='https://github.com/bdragon300/sparsedlist',
license='Apache-2.0',
author='Igor Derkach',
author_email='gosha753951@gmail.com',
description='Endless list with non-contiguous indexes',
long_description=long_description,
long_description_content_type='text/x-rst',
classifiers=[
'Programming Language :: Python :: 3',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=[
'pyskiplist'
]
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
81,
301,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
1... | 2.5625 | 352 |
import time
import numpy as np
import random
# np.random.seed(5)
# random.seed(5)
import sys
import os
import argparse
sys.path.insert(1, '../utils/')
from coord_helper import *
from data_helper import *
import bullet_client as bc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
args = parser.parse_args()
data_dir = os.path.join(args.home_dir_data, 'geo_data')
collection_result_dir = os.path.join(args.home_dir_data, 'collection_result_vary_scale')
collection_labels_dir = os.path.join(collection_result_dir, 'labels')
all_dict = {}
ct = 0
for result_file in os.listdir(collection_result_dir):
if not result_file.endswith('.txt'):
continue
result_file_name = result_file[:-4]
result_np = load_result_file(os.path.join(collection_result_dir, result_file))
if result_np.size == 0:
continue
all_dict[result_file_name] = []
n_v = 0
one_v_ct = 0
prev_scale = None
cur_v_arr = []
for i in range(result_np.shape[0]):
if i == 0:
one_v_ct += 1
prev_scale = result_np[i][:6]
cur_v_arr.append(i)
continue
if np.allclose(result_np[i][:6], prev_scale):
one_v_ct += 1
cur_v_arr.append(i)
continue
all_dict[result_file_name].append(np.array(cur_v_arr))
cur_v_arr = [i]
prev_scale = result_np[i][:6]
one_v_ct = 1
n_v += 1
if len(cur_v_arr) > 0:
n_v += 1
all_dict[result_file_name].append(np.array(cur_v_arr))
# print(os.path.join(collection_result_dir, result_file))
# print(all_dict[result_file_name])
ct += 1
if ct % 100 == 0:
print(ct)
# if ct >= 5:
# break
out_dir = os.path.join(collection_labels_dir, 'all_list.txt')
all_result_file_name = list(all_dict.keys())
random.shuffle(all_result_file_name)
out_arr = []
for result_file_name in all_result_file_name:
one_arr = all_dict[result_file_name]
for i in range(len(one_arr)):
for j in range(one_arr[i].shape[0]):
out_arr.append('{}_v{}_{}'.format(result_file_name, i+1, one_arr[i][j]))
with open(out_dir, 'w+') as f:
for line in out_arr:
f.write(line + '\n')
print('total v', len(all_result_file_name))
print('avg', len(out_arr) / len(all_result_file_name))
| [
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
2,
45941,
13,
25120,
13,
28826,
7,
20,
8,
198,
2,
4738,
13,
28826,
7,
20,
8,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
17597,
13,
6978,... | 2.256592 | 986 |
# -*- coding: utf-8 -*-
'''
Anagram Difference - https://www.hackerrank.com/challenges/anagram/problem
''' | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
2025,
6713,
43795,
532,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
272,
6713,
14,
45573,
198,
7061,
6
] | 2.547619 | 42 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import requests
import json | [
11748,
7007,
198,
11748,
33918
] | 5.4 | 5 |
# Generated by Django 3.0.8 on 2020-08-21 17:43
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
2919,
12,
2481,
1596,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import codecs
import gzip
import pickle
from functools import reduce
from itertools import chain
from operator import mul
from pyodesys.symbolic import SymbolicSys
radiolysis1 = """
H4sIAGqdpVgC/42YD1BURRjA7x0gpliplalYYhes4PkHJTHzuFfQIqBXinFlz/OEpxwBssdhJFH4
F5Qs/IuomJZZURqQIlqkaZam1Yzk6KCj0zQ26VQETZM5I/Ru9zje3b31i5vhvd397bfft/vt7vu+
sqBSokPW/jqdLtfukott+fY8uZAIEtGzWoM8r8RYiuxktIEEWfu6a5JtJbGlFgMJtoZ6iwYSwhot
yW7eQPpYg2ijgYRag1mDgfS1hvUKYN3uYkI8pX5sTFpicvp7xvQAYR4JKmKAR415JTFK6W6PPNZ2
j1feRA99r2wNUaqYmQMlMghZ+yhl2eg2kQymWifHWsh9Vj19I/fTKkuykTxgFdxVZAhtsiSTB6ll
Ch1LhrK6WDKM4bFGMpzJUurCGeeuHMHExpCHWAcjeZh1mGgkI2XKFTgXZ5EIiYxCEnkEpeqUn5Cq
p0+/nywRQw+g4wCPIp8qIQCI7JEgcCREIe2+XgAhvgYUGK2WIGgA0Sign+ADxPQAeo4VY5Dm6IIX
MKqV1NJhLOL3p8A4NaDXkDAe0mGC1kSph4j1n2rBb4iJ/hL8gUn+y+0PxPkOEQg8hvzM8wcmB1rh
u1jxWg6jNnNKIOAr4XGkrb8XmMrzScEDPAH55DTeYvVIMEEbJ0F7iN7lNvtPtb8EEXEUpFoowJOI
rwEFnuKbySQkQlsvibf1eubhaWhfYMgfkiF/mI74NlAgBfFngQKpiHeAeYA0yKNmIP5SUWAm4jdT
wMJbTdpFAZ5B3DOOLdaz0GE+C5qH2ZDDpENePYfvkwx4DtpZGYjfTAErfx7cgEwv5AK7055XSJ6X
yAsIi3Mb9u9MGjoJi7Nrjb/+YwvG4iwp3uXIqMXi0oyZ62omNWGxrCPv9oAbg7GYdbm8c271WSw6
bs78S2hvx2LRxYGVg4qOYzH7eGd96qLtvS/mn879Nv9C3QGlpiNsb3j/IizKTYdOZpUl9r6YLy+/
FLz70nhF8h8TmocVpGFx/uXMiAXh47B5bULu2vNX27D5ZkOSmFkRh03LctJOXb86BouW0G36i3N+
Ub1EdZfUVMZ0YHP31LONC693YNO/1+K2pDe2YHHXggn7TrR34YTmUUfWZCcqY5V3bl3jKtigWGE7
3XB+hAObN15bfSN80aH/xYhRm3Ja10claTAJBmn7rd+PZWDzkPTJU8btPYjFwltLV0QOb8fmCz/+
XXnEPAgnVEWE1O3UbcWm7KrImtod07H55xnX2la1ZGMxrTp6w+GIxdi8h5wva21vw6Zz3aeqil05
2NQWnzgsJekkNnU5o+vP9A3FpitXRkqRFzZj0+3Wqoqpuyrw0T5/1hu3xet6a6Z1ttzoGhK3UrYO
dH+FOvJlu9PmyF9idzrs+a5CMlciLyqfQhJK6Vb+3C7ifVEV2EOgD/pPcbR57CrT000veL8des94
G1KdjAJF2c9dClI8sZ+ikSPfnumyOWV7JpkvEbuiygKk9QWg8vDMOx0FbiALAmRoly2EJCyCgGwI
cEBADjQPL0ESciEgDwLyIWAxBBRAAIEAJwQUQoALWu4i6NxeAg3xMgQUQ8ArELAUAkog4FUIKIWA
1yDgdQgog4BlELAcAlZAwEoIWAUBqyGgHAIqIGANBKyFgEoIeAMC1kHAmxDwFgRUQcB6CNgAARsh
YBMEbIaALRBQfWeA5SboLbhVIjXKLbgN8c9+KnE74ofLFNgBRdS1iB+yU2An0o4hvcDbiBcLe4Bd
iB9PU2A34gc3FHgH8WN6CryL+GEiBfb4ftYH6vAe0grOVMBepB1UeIH3AwFfJT9A/LQBBT5E2iGi
F6jTij3UOnyk5WF6FfAx4sXjHmAfdAvuR7zwygN8wpfAYrB6fhKHAQ38II0BjdBd/imUvDgAmXkQ
Sl40QamHQ1DyohmKJA9DyYsjUOrhMyh58TmUemiBkhdfQKmHo1Dy4hgUcn8JJS+OQ6mHE1Dy4iso
9XASSl58DaUevoGSF6eg++M0BHwLAWcg4CwEfAcB30PAD8AtWDT2P8PitZwyGQAA
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
8,
198,
198,
11748,
40481,
82,
198,
11748,
308,
13344,
198,
11748,
2298,
293,
198,
... | 1.408029 | 1,669 |
import os
import pytest
import json
import time
from pytapo import Tapo
import mock
user = os.environ.get("PYTAPO_USER")
password = os.environ.get("PYTAPO_PASSWORD")
invalidPassword = "{password}_invalid".format(password=password)
host = os.environ.get("PYTAPO_IP")
"""
util functions for unit tests
"""
"""
unit tests below
"""
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
33918,
198,
11748,
640,
198,
6738,
12972,
83,
41817,
1330,
16880,
78,
198,
11748,
15290,
198,
198,
7220,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
47,
56,
51,
2969,
46,
62,
29904,
49... | 2.695652 | 138 |
import pandas as pd
dfcsv=pd.read_csv('gruppen-zeitslots-vers3.csv')
dfcsv.T.to_json('gruppen-zeitslots.json') | [
11748,
19798,
292,
355,
279,
67,
198,
7568,
40664,
28,
30094,
13,
961,
62,
40664,
10786,
48929,
381,
268,
12,
2736,
896,
75,
1747,
12,
690,
18,
13,
40664,
11537,
198,
7568,
40664,
13,
51,
13,
1462,
62,
17752,
10786,
48929,
381,
268,... | 2.156863 | 51 |
from time import time
from urllib.request import Request, urlopen
urls = [
"https://www.google.com/search?q=" + i
for i in ["apple", "pear", "grape", "pineapple", "orange", "strawberry", "pie", "helloworld"]
]
begin = time()
result = []
for url in urls:
request = Request(url, headers={"User-Agent": "Mozilla/5.0"})
response = urlopen(request)
page = response.read()
result.append(len(page))
print(result)
end = time()
print(f"실행 시간: {end - begin:.3f}초")
| [
6738,
640,
1330,
640,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19390,
11,
19016,
9654,
198,
198,
6371,
82,
796,
685,
198,
220,
220,
220,
366,
5450,
1378,
2503,
13,
13297,
13,
785,
14,
12947,
30,
80,
2625,
1343,
1312,
198,
220,
... | 2.39801 | 201 |
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
FACEBOOK_URL = reverse('authentication:facebook')
GOOGLE_URL = reverse('authentication:google')
TWITTER_URL = reverse('authentication:twitter')
GOOGLE_VALIDATION = "authors.apps.authentication.validators.SocialValidation.google_auth_validation"
FACEBOOK_VALIDATION = "authors.apps.authentication.validators.SocialValidation.facebook_auth_validation"
TWITTER_VALIDATION = "authors.apps.authentication.validators.SocialValidation.twitter_auth_validation"
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
... | 3.383085 | 201 |
from express.properties.non_scalar import NonScalarProperty
class InchiKey(NonScalarProperty):
"""
Inchi key property class.
"""
| [
6738,
4911,
13,
48310,
13,
13159,
62,
1416,
282,
283,
1330,
8504,
3351,
282,
283,
21746,
628,
198,
4871,
554,
11072,
9218,
7,
15419,
3351,
282,
283,
21746,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
554,
11072,
1994,
3119,
... | 2.86 | 50 |
import argparse
import glob
import sys
import os
import time
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '../', '../')))
from libs.utils import load_chars
from libs.font_utils import check_font_chars, load_font
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Find chars not support by some fonts')
#parser.add_argument('--chars_file', type=str, default='./data/chars/chn.txt')
parser.add_argument('--chars_file', type=str, default='./data/chars/japeng.txt')
parser.add_argument('--font_dir', type=str, default='./data/fonts/jap')
parser.add_argument('--delete', action="store_true", default=False,
help='whether or not to delete font which not full support the chars_file')
args, _ = parser.parse_known_args()
charset = load_chars(args.chars_file)
font_paths = glob.glob(args.font_dir + '/*.*')
fonts = {}
for p in font_paths:
ttf = load_font(p)
fonts[p] = ttf
illegal_unsupported_chars = set()
useful_fonts = []
for k, v in fonts.items():
unsupported_chars, supported_chars = check_font_chars(v, charset)
print("font: %s ,chars unsupported: %d" % (k, len(unsupported_chars)))
print(unsupported_chars)
for char in unsupported_chars:
illegal_unsupported_chars.add(char)
#print(supported_chars)
if len(unsupported_chars) != 0:
if args.delete:
os.remove(k)
else:
useful_fonts.append(k)
print("%d fonts support all chars(%d) in %s:" % (len(useful_fonts), len(charset), args.chars_file))
print(useful_fonts)
print(illegal_unsupported_chars)
char_list = []
with open('new_japeng.txt', 'w') as f:
#先写入一个空格
f.write(' \n' )
for line in open('data/chars/japeng.txt') :
char = line.strip('\r\n ')
if len(char) > 0:
if char in illegal_unsupported_chars:
continue
f.write(char + '\n')
with open('new_singleword.dat', 'w') as f:
#先写入一个空格
#f.write(' \n' )
for line in open('data/chars/singleword.dat') :
char = line.strip('\r\n ')
if len(char) > 0:
if char in illegal_unsupported_chars:
continue
f.write(char + '\n')
| [
11748,
1822,
29572,
198,
11748,
15095,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
640,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
834,
7753,
834,
11,
... | 2.067299 | 1,159 |
# Import libraries
import discord
import re
TOKEN = "XXXX"
ADMIN_ROLES = [00000000000000000, 00000000000000000] # insert numerical IDs of admin roles here - bot will ignore changes made by these users
BOT_ROLE_ID = 00000000000000000 # numerical ID of the role given to this bot - changes made by this role will be ignored
STATIC_NICKNAME_ROLE_ID = 00000000000000000 # numerical ID - bot will revert changes made by these users
PLACEHOLDER_NICKNAME = "Valued server member"
NICKNAME_PATTERNS = [
r'(discord\.gg/)', # invite links
r'(nigg|fag|\bnazi\b)', # banned words - \bword\b is exact match only
r'(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)' # hyperlinks
]
client = discord.Client()
# Checks if a nickname matches any of the banned patterns
# triggered on new/removed nickname
@client.event
# triggered on username change
@client.event
# check if new members' usernames need filtering
@client.event
@client.event
client.run(TOKEN)
| [
2,
17267,
12782,
198,
11748,
36446,
198,
11748,
302,
198,
198,
10468,
43959,
796,
366,
24376,
1,
198,
2885,
23678,
62,
49,
3535,
1546,
796,
685,
8269,
10535,
830,
11,
27551,
10535,
830,
60,
1303,
7550,
29052,
32373,
286,
13169,
9176,
... | 2.832869 | 359 |
from django.contrib import admin
from models import City
admin.site.register(City)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
4981,
1330,
2254,
198,
198,
28482,
13,
15654,
13,
30238,
7,
14941,
8,
198
] | 3.5 | 24 |
import os
from dotenv import load_dotenv
# Load environmental variables
load_dotenv()
api_key = os.environ.get("API_KEY")
| [
11748,
28686,
198,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2,
8778,
6142,
9633,
198,
2220,
62,
26518,
24330,
3419,
198,
15042,
62,
2539,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
17614,
62,
20373,
4943,
198
] | 3.02439 | 41 |
"""
Script goal,
Predict future FRI based on current climate
"""
#==============================================================================
__title__ = "FRI Prediction"
__author__ = "Arden Burrell"
__version__ = "v1.0(27.11.2019)"
__email__ = "arden.burrell@gmail.com"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
# import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import shutil
import time
from collections import OrderedDict
# from scipy import stats
# from numba import jit
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
# import seaborn as sns
import matplotlib as mpl
import cartopy as ct
import cartopy.crs as ccrs
import cartopy.feature as cpf
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import make_axes_locatable
import socket
import string
# ========== Import my dunctions ==========
import myfunctions.corefunctions as cf
import myfunctions.PlotFunctions as pf
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# # Import debugging packages
# import pdb as ipdb
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
print("cartopy version : ", ct.__version__)
#==============================================================================
#==============================================================================
#==============================================================================
if __name__ == '__main__':
main()
| [
37811,
201,
198,
7391,
3061,
11,
220,
201,
198,
201,
198,
47,
17407,
2003,
48167,
1912,
319,
1459,
4258,
201,
198,
201,
198,
37811,
201,
198,
2,
23926,
25609,
855,
201,
198,
201,
198,
834,
7839,
834,
796,
366,
37,
7112,
46690,
1,
... | 3.108696 | 828 |
r"""
The transient advection-diffusion equation with a given divergence-free
advection velocity.
Find :math:`u` such that:
.. math::
\int_{\Omega} s \pdiff{u}{t}
+ \int_{\Omega} s \nabla \cdot \left(\ul{v} u \right)
+ \int_{\Omega} D \nabla s \cdot \nabla u
= 0
\;, \quad \forall s \;.
View the results using::
python postproc.py square_tri2.*.vtk -b --wireframe
"""
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_tri2.mesh'
regions = {
'Omega' : 'all', # or 'cells of group 6'
'Gamma_Left' : ('vertices in (x < -0.99999)', 'facet'),
'Gamma_Right' : ('vertices in (x > 0.99999)', 'facet'),
}
fields = {
'concentration' : ('real', 1, 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'concentration', 0, 1),
's' : ('test field', 'concentration', 'u'),
}
ebcs = {
'u1' : ('Gamma_Left', {'u.0' : 2.0}),
'u2' : ('Gamma_Right', {'u.0' : 0.0}),
}
# Units: D: 0.0001 m^2 / day, v: [0.1, 0] m / day -> time in days.
materials = {
'm' : ({'D' : 0.0001, 'v' : [[0.1], [0.0]]},),
}
integrals = {
'i' : 2,
}
equations = {
'advection-diffusion' :
"""
dw_volume_dot.i.Omega(s, du/dt)
+ dw_advect_div_free.i.Omega(m.v, s, u)
+ dw_laplace.i.Omega(m.D, s, u)
= 0
"""
}
solvers = {
'ts' : ('ts.simple', {
't0' : 0.0,
't1' : 10.0,
'dt' : None,
'n_step' : 11, # Has precedence over dt.
}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
'ls' : ('ls.scipy_direct', {}),
}
options = {
'ts' : 'ts',
'nls' : 'newton',
'ls' : 'ls',
'save_steps' : -1,
}
| [
81,
37811,
198,
464,
32361,
512,
303,
596,
12,
26069,
4241,
16022,
351,
257,
1813,
43366,
12,
5787,
198,
324,
303,
596,
15432,
13,
198,
198,
16742,
1058,
11018,
25,
63,
84,
63,
884,
326,
25,
198,
198,
492,
10688,
3712,
198,
220,
2... | 1.905896 | 882 |
"""
shows deep stats for the MPD
- calculates similarity and multiplies it with the track popularity
- if the track was unknown, decrease its similarity weight
usage:
python deeper_stats.py path-to-mpd-data/
"""
#import sys
import json
import re
import collections
import os
import gzip
import pandas as pd
import numpy as np
#from sklearn.neighbors import NearestNeighbors
import pickle
from collections import defaultdict
import heapq
import math
MOST_POPULAR_WEIGHT = 0.000002
TITLE_WEIGHT = 0.01
ALBUM_WEIGHT = 0.1
ARTIST_WEIGHT = 0.01
#"challenge_track_predImprSim_256418_64.csv",
#"challenge_track_scoreImprSim_256418_64.csv",
#"challenge_track_names.csv",
#1.0,
relevantTrackKNNs = ["challenge_track_predNNSim_256418_64.csv", "challenge_track_predImprSim_256418_64.csv"]
relevantTrackScores = [ "challenge_track_score2NNSim_256418_64.csv", "challenge_track_scoreImprSim_256418_64.csv"]
relevantTrackNames = ["challenge_track_names.csv", "challenge_track_names.csv"]
trackWeights = [ 1.0, 1.0]
relevantAlbKNNs = ["challenge_track_predNNAlbFinal_250561_64.csv"]
relevantAlbScores = ["challenge_track_scoreNNAlbFinal_250561_64.csv"]
relevantAlbNames = ["challenge_album_names.csv"]
albWeights = [0.1]
trackDT = zip(relevantTrackKNNs, relevantTrackScores, relevantTrackNames, trackWeights)
albDT = zip(relevantAlbKNNs, relevantAlbScores, relevantAlbNames, albWeights)
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
"""
return x / x.sum()
#scoreMatExp = np.exp(np.asarray(x))
#return scoreMatExp / scoreMatExp.sum()
if __name__ == '__main__':
path = "challenge_set"
filename = "challenge_set.json"
#outFile = "solution_Word2Vec_improvedAllVecs.csv"
outFile = "solution_OM4LogPOP.csv"
process_mpd(path, filename, outFile, trackDT, albDT)
| [
37811,
198,
220,
220,
220,
2523,
2769,
9756,
329,
262,
4904,
35,
198,
220,
220,
220,
532,
43707,
26789,
290,
15082,
444,
340,
351,
262,
2610,
11533,
198,
220,
220,
220,
532,
611,
262,
2610,
373,
6439,
11,
10070,
663,
26789,
3463,
19... | 2.621469 | 708 |
# Copyright 2021 Jacob Baumbach
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from collections import OrderedDict
from typing import Optional
from typing import Union
import pytest
from adorn.exception.configuration_error import ConfigurationError
from adorn.params import _is_dict_free
from adorn.params import infer_and_cast
from adorn.params import Params
from adorn.params import unflatten
LEFT_MERGE = [
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
dict(),
{"a": 0, "b.c": 1, "b.d": 2, "b.e.f": 3, "b.e.g": 4},
),
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
Params({}),
{"a": 0, "b.c": 1, "b.d": 2, "b.e.f": 3, "b.e.g": 4},
),
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
{"a": 5, "b.d": 6, "b.e.f": 7},
{"a": 5, "b.c": 1, "b.d": 6, "b.e.f": 7, "b.e.g": 4},
),
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
Params({"a": 5, "b.d": 6, "b.e.f": 7}),
{"a": 5, "b.c": 1, "b.d": 6, "b.e.f": 7, "b.e.g": 4},
),
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
{"a": 5, "b": {"d": 6, "e": {"f": 7, "h": 8}, "i": 9}, "j": 10},
{
"a": 5,
"b.c": 1,
"b.d": 6,
"b.e.f": 7,
"b.e.g": 4,
"b.e.h": 8,
"b.i": 9,
"j": 10,
},
),
(
Params({"a": 0, "b": {"c": 1, "d": 2, "e": {"f": 3, "g": 4}}}),
Params({"a": 5, "b": {"d": 6, "e": {"f": 7, "h": 8}, "i": 9}, "j": 10}),
{
"a": 5,
"b.c": 1,
"b.d": 6,
"b.e.f": 7,
"b.e.g": 4,
"b.e.h": 8,
"b.i": 9,
"j": 10,
},
),
]
@pytest.fixture(params=LEFT_MERGE)
@pytest.mark.parametrize(
"params",
[
Params({"a": "b", "c": "d"}),
Params({"a": {"b": "c"}, "d": Params({"e": "f"})}),
Params({"a": {"b": "c"}, "d": Params({"e": Params({"f": "g"})})}),
Params({"a": {"b": "c"}, "d": [Params({"e": Params({"f": "g"})})]}),
],
)
@pytest.mark.parametrize(
"method_name,params,target",
[
("pop_int", Params({"a": "1"}), 1),
("pop_int", Params({"a": None}), None),
("pop_float", Params({"a": "1.1"}), 1.1),
("pop_float", Params({"a": None}), None),
("pop_bool", Params({"a": None}), None),
("pop_bool", Params({"a": True}), True),
("pop_bool", Params({"a": "true"}), True),
("pop_bool", Params({"a": "false"}), False),
],
)
| [
2,
15069,
33448,
12806,
8999,
2178,
620,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
92... | 1.87822 | 1,708 |
# filefuzzer.py
#
# python script
#
# "file fuzzer"
#
# generates a bunch of random data using os.urandom then writes it to fuzzy.csv
# this was used to generate fuzzy.csv
# fuzzy.csv is used to test error handling when reading data
import os
with open('fuzzy.csv', 'wb') as fout:
fout.write(os.urandom(1024*50)) # replace 1024 with size_kb if not unreasonably large | [
2,
2393,
69,
4715,
263,
13,
9078,
198,
2,
198,
2,
21015,
4226,
198,
2,
198,
2,
366,
7753,
26080,
263,
1,
198,
2,
198,
2,
18616,
257,
7684,
286,
4738,
1366,
1262,
28686,
13,
333,
3749,
788,
6797,
340,
284,
34669,
13,
40664,
198,
... | 3.091667 | 120 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import argparse
import json
import jinja2
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
474,
259,
6592,
17,
198,
198,
361,
11593,
3672,... | 2.327586 | 58 |
import pytest
from frictionless import Table, exceptions
# Loader
@pytest.mark.ci
@pytest.mark.ci
@pytest.mark.ci
| [
11748,
12972,
9288,
198,
6738,
23822,
1203,
1330,
8655,
11,
13269,
628,
198,
2,
8778,
263,
628,
198,
31,
9078,
9288,
13,
4102,
13,
979,
628,
198,
31,
9078,
9288,
13,
4102,
13,
979,
628,
198,
31,
9078,
9288,
13,
4102,
13,
979,
198
... | 2.772727 | 44 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import logging
import os
OK = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
PINK = '\033[95m'
BLUE = '\033[94m'
GREEN = OK
RED = FAIL
WHITE = END
YELLOW = WARNING
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.59952 | 417 |
import logging
import os
from signalwire.relay.consumer import Consumer
r = Receiver()
r.run()
| [
11748,
18931,
198,
11748,
28686,
198,
6738,
6737,
21809,
13,
2411,
323,
13,
49827,
1330,
18110,
198,
198,
81,
796,
39106,
3419,
198,
81,
13,
5143,
3419,
198
] | 3.428571 | 28 |
from bob.errors import ParseError
import fnmatch
import platform
# TODO: support more architectures; support musl/dietlibc
# get host architecture
# get host autoconf triple
# set or replace vendor field in autoconf triplet
manifest = {
'apiVersion' : "0.15",
'stringFunctions' : {
"gen-autoconf" : genAutoconf,
"host-arch" : hostArch,
"host-autoconf" : hostAutoconf,
}
}
| [
6738,
29202,
13,
48277,
1330,
2547,
325,
12331,
198,
11748,
24714,
15699,
198,
11748,
3859,
198,
198,
2,
16926,
46,
25,
1104,
517,
45619,
26,
1104,
1928,
75,
14,
67,
1155,
8019,
66,
198,
198,
2,
651,
2583,
10959,
198,
198,
2,
651,
... | 2.58125 | 160 |
from utils.layers import *
from utils.convolve4d import *
from vgg19.vgg19 import VGG19
from tool.log_config import *
class HDDRNet(object):
'''
The HDDRNet framework
'''
| [
6738,
3384,
4487,
13,
75,
6962,
1330,
1635,
198,
6738,
3384,
4487,
13,
42946,
6442,
19,
67,
1330,
1635,
198,
6738,
410,
1130,
1129,
13,
85,
1130,
1129,
1330,
569,
11190,
1129,
198,
6738,
2891,
13,
6404,
62,
11250,
1330,
1635,
628,
1... | 2.583333 | 72 |
from .models import Btc, Ltc, Dash, Doge # Bcy
from rest_framework import viewsets
from rest_framework import status
from rest_framework.response import Response
from .serializers import (BtcSerializer, LtcSerializer, DashSerializer,
DogeSerializer, WithdrawSerializer) # BcySerializer
from .permissions import IsOwnerOrReadOnly
from rest_framework import mixins
from rest_framework.decorators import detail_route, list_route
from .utils import decode_signin, extract_webhook_id, unsubscribe_from_webhook
from rest_framework.permissions import IsAuthenticated
from .signals import get_webhook
class BtcViewSet(BaseViewSet):
"""
A simple ViewSet for listing or retrieving Bitcocins addresses.
"""
serializer_class = BtcSerializer
queryset = Btc.objects.all()
permissions = [IsOwnerOrReadOnly]
model = Btc
class LtcViewSet(BaseViewSet):
"""
A simple ViewSet for listing or retrieving Litecoins addresses.
"""
serializer_class = LtcSerializer
queryset = Ltc.objects.all()
permissions = [IsOwnerOrReadOnly]
model = Ltc
class DashViewSet(BaseViewSet):
"""
A simple ViewSet for listing or retrieving Dash addresses.
"""
serializer_class = DashSerializer
queryset = Dash.objects.all()
permissions = [IsOwnerOrReadOnly]
model = Dash
class DogeViewSet(BaseViewSet):
"""
A simple ViewSet for listing or retrieving Dogecoins addresses.
"""
serializer_class = DogeSerializer
queryset = Doge.objects.all()
permissions = [IsOwnerOrReadOnly]
model = Doge
'''
class BcyViewSet(BaseViewSet):
"""
A simple ViewSet for listing or retrieving Bitcocins testnet addresses.
"""
serializer_class = BcySerializer
queryset = Bcy.objects.all()
permissions = [IsOwnerOrReadOnly]
model = Bcy
'''
| [
6738,
764,
27530,
1330,
347,
23047,
11,
19090,
66,
11,
16189,
11,
2141,
469,
220,
1303,
347,
948,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261... | 2.907378 | 637 |
from django import template
register = template.Library()
@register.filter
def cut_text(value):
""" returns a cut string if its length is greater than 50 chars """
return value if len(value) <= 50 else f"{value[:50]}..." | [
6738,
42625,
14208,
1330,
11055,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
24455,
198,
4299,
2005,
62,
5239,
7,
8367,
2599,
198,
220,
220,
220,
37227,
5860,
257,
2005,
4731,
611,
663,
4129,
318,
3744,
621,... | 3.382353 | 68 |
import click
@click.group()
@click.pass_obj
@remote_entry_point.command()
@click.option('-n', '--name', required=True, type=str)
@click.option('-u', '--url', required=True, type=str)
@click.pass_obj
@remote_entry_point.command()
@click.option('-n', '--name', required=True, type=str)
@click.pass_obj
@remote_entry_point.command()
@click.pass_obj
| [
11748,
3904,
628,
198,
31,
12976,
13,
8094,
3419,
198,
31,
12976,
13,
6603,
62,
26801,
628,
198,
31,
47960,
62,
13000,
62,
4122,
13,
21812,
3419,
198,
31,
12976,
13,
18076,
10786,
12,
77,
3256,
705,
438,
3672,
3256,
2672,
28,
17821,... | 2.641791 | 134 |
# Copyright 2018 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import TestCase
import dill
from src import utils; utils.unit_test = True
from tests.utils import clean_test_files
| [
2,
15069,
2864,
6060,
1273,
897,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,... | 3.692308 | 195 |
from my_functions import ssh_command2
from my_devices import devices_list
from concurrent.futures import ProcessPoolExecutor, as_completed
from datetime import datetime
start_time = datetime.now()
max_procs = 5
pool = ProcessPoolExecutor(max_procs)
future_list = []
for device in devices_list:
future = pool.submit(ssh_command2, device, "show version")
future_list.append(future)
for future in as_completed(future_list):
print("=" * 40)
print(future.result())
end_time = datetime.now()
print("=" * 40)
print("\nElapsed time: ", end_time - start_time)
print()
| [
6738,
616,
62,
12543,
2733,
1330,
26678,
62,
21812,
17,
198,
6738,
616,
62,
42034,
1330,
4410,
62,
4868,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10854,
27201,
23002,
38409,
11,
355,
62,
785,
16838,
198,
6738,
4818,
8079,
1330,
4818,... | 2.949239 | 197 |
import sys,os
import random
sys.path.append(os.getcwd())
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import load5foldData
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
# th.manual_seed(100)
# th.cuda.manual_seed_all(100)
# random.seed(12345)
# th.manual_seed(12345)
# th.cuda.manual_seed_all(12345)
import os
import datetime
import json
ensure_directory("./results")
current = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
RESULTS_FILE = "./results/out_{0}_{1}_{2}.txt".format("Twitter16", "BiGCN_org", current)
from torch_scatter import scatter_mean
from torch_geometric.nn import GCNConv
import copy
# =========================
# MAIN
# =========================
lr=0.0005
weight_decay=1e-4
patience=10
n_epochs=200
# n_epochs=100 # JIHO
batchsize=128
TDdroprate=0.2
BUdroprate=0.2
datasetname=sys.argv[1] #"Twitter15"、"Twitter16"
iterations=int(sys.argv[2])
model="GCN"
device = th.device('cuda:1' if th.cuda.is_available() else 'cpu')
test_accs = []
NR_F1 = []
FR_F1 = []
TR_F1 = []
UR_F1 = []
for iter in range(iterations):
fold0_x_test, fold0_x_train, fold1_x_test, fold1_x_train, fold2_x_test, fold2_x_train, fold3_x_test, fold3_x_train, fold4_x_test,fold4_x_train = load5foldData(datasetname)
# write_results(fold0_x_train)
# write_results(fold0_x_test)
# ensure_directory("./temp")
save_json_file("./temp/fold0_train.txt", fold0_x_train)
save_json_file("./temp/fold0_test.txt", fold0_x_test)
treeDic=loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(treeDic, fold0_x_test, fold0_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs0: " + str(accs0))
train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(treeDic, fold1_x_test, fold1_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs1: " + str(accs1))
train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(treeDic, fold2_x_test, fold2_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs2: " + str(accs2))
train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(treeDic, fold3_x_test, fold3_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs3: " + str(accs3))
train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(treeDic, fold4_x_test, fold4_x_train, TDdroprate,BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
write_results("accs4: " + str(accs4))
test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)
print(train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0)
NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)
UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)
print("Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}".format(
sum(test_accs) / iterations, sum(NR_F1) /iterations, sum(FR_F1) /iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))
| [
11748,
25064,
11,
418,
198,
11748,
4738,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
1136,
66,
16993,
28955,
198,
6738,
10854,
13,
14681,
1330,
1635,
198,
11748,
28034,
355,
294,
198,
6738,
28034,
62,
1416,
1436,
1330,
41058,
62,
326... | 2.176996 | 1,678 |
import torch.nn as nn
import torch | [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034
] | 3.4 | 10 |
import argparse
import random
import itertools
import os
import sys
import rule_classifier as paper_classifier
import urllib.request
import bs4 as bs
import time
def label_paper(paper_id = None, paper_meta = None, cased_regexes = None, feature = None):
"""Label one paper
:param paper_id: The paper ID
:param paper_meta: Store meta information of a paper
:param cased_regexes: store meta information of a paper
:param feature: which part of content will we used to label papers. i.e. "title" or "fulltext"
:return: Nothing.
"""
if not os.path.isfile(f'papers/{paper_id}.pdf'):
os.makedirs(f'papers/', exist_ok=True)
# try:
urllib.request.urlretrieve(f'https://www.aclweb.org/anthology/{paper_id}.pdf', f'papers/{paper_id}.pdf')
time.sleep(2) # maybe we would wait some time until downloading processing finishes.
os.system(f'pdftotext papers/{paper_id}.pdf papers/{paper_id}.txt')
print('1')
# except:
# print(f'WARNING: Error while downloading/processing https://www.aclweb.org/anthology/{paper_id}.pdf')
# return
with open(f'papers/{paper_id}.txt', 'r') as f:
paper_text = '\n'.join(f.readlines())
paper_title = ''.join(paper_meta.title.findAll(text=True))
is_cased = 1 # if case-sensitive
if feature == "title":
feature = paper_title
is_cased = 0
elif feature == "fulltext":
feature = paper_text
is_cased = 1
predicted_tags = paper_classifier.classify(feature, cased_regexes, is_cased)
print(f'Title: {paper_title}\n'
f'Local location: papers/{paper_id}.pdf\n'
f'Online location: https://www.aclweb.org/anthology/{paper_id}.pdf\n'
f'Text file location: auto/{paper_id}.txt')
for i, tag in enumerate(predicted_tags):
print(f'Tag {i}: {tag}')
print("------------------------------------------------\n")
os.makedirs(f'auto/', exist_ok=True)
fin = open(f'auto/{paper_id}.txt', 'w')
print(f'# Title: {paper_title}\n# Online location: https://www.aclweb.org/anthology/{paper_id}.pdf', file=fin)
for tag, conf, just in predicted_tags:
print(f'# CHECK: confidence={conf}, justification={just}\n{tag}',file=fin)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get a paper to try to read and annotate")
parser.add_argument("--paper_id", type=str, default=None,
help="The paper ID to get, if you want to specify a single one (e.g. P84-1031)")
parser.add_argument("--years", type=str, default="19",
help="If a paper ID is not specified, a year (e.g. 19) or range of years (e.g. 99-02) from which"+
" to select a random paper.")
parser.add_argument("--confs", type=str, default="P,N,D",
help="A comma-separted list of conference abbreviations from which papers can be selected")
parser.add_argument("--volumes", type=str, default="1,2",
help="A comma-separated list of volumes to include (default is long and short research papers)."+
" 'all' for no filtering.")
parser.add_argument("--n_sample", type=str, default="1",
help="the number of sampled papers if paper_id is not specified (e.g. 1)."
" Write 'all' to select all papers from those years/conferences/volumes.")
parser.add_argument("--template", type=str, default="template.cpt",
help="The file of concept template (e.g. template.cpt)")
parser.add_argument("--feature", type=str, default="fulltext",
help="Which parts of paper is used to classify (e.g. fulltext|title)")
args = parser.parse_args()
# init variables
feature = args.feature
paper_id = args.paper_id
template = args.template
n_sample = args.n_sample
volumes = args.volumes.split(',')
paper_map = {}
# lead the concept template
cased_regexes = paper_classifier.genConceptReg(file_concept=template, formate_col = 3)
# if paper_id has not been specified
if paper_id == None:
years = args.years.split('-')
confs = args.confs.split(',')
if len(years) == 2:
years = list(range(int(years[0]), int(years[1])+1))
else:
assert len(years) == 1, "invalid format of years, {args.years}"
for pref, year in itertools.product(confs, years):
year = int(year)
pref= pref.upper()
with open(f'acl-anthology/data/xml/{pref}{year:02d}.xml', 'r') as f:
soup = bs.BeautifulSoup(f, 'xml')
for vol in soup.collection.find_all('volume'):
if vol.attrs['id'] in volumes:
for pap in vol.find_all('paper'):
if pap.url:
paper_map[pap.url.contents[0]] = pap
paper_keys = list(paper_map.keys())
if n_sample == 'all':
for paper_id in paper_keys:
paper_meta = paper_map[paper_id]
label_paper(paper_id, paper_meta, cased_regexes, feature)
else:
for _ in range(int(n_sample)):
randid = random.choice(paper_keys)
if not os.path.isfile(f'annotations/{randid}.txt') and not os.path.isfile(f'auto/{randid}.txt'):
paper_id = randid
paper_meta = paper_map[paper_id]
#print(paper_meta)
label_paper(paper_id, paper_meta, cased_regexes, feature)
else:
print(f'Warning: {paper_id} has been labeled!')
# if paper_id is specified
else:
prefix = paper_id.split("-")[0]
with open(f'acl-anthology/data/xml/{prefix}.xml', 'r') as f:
soup = bs.BeautifulSoup(f, 'xml')
for vol in soup.collection.find_all('volume'):
if vol.attrs['id'] in volumes:
for pap in vol.find_all('paper'):
if pap.url and pap.url.contents[0] == paper_id:
paper_map[pap.url.contents[0]] = pap
#print(paper_map[pap.url.contents[0]])
if not os.path.isfile(f'annotations/{paper_id}.txt') and not os.path.isfile(f'auto/{paper_id}.txt'):
label_paper(paper_id, paper_map[paper_id], cased_regexes, feature)
sys.exit(1)
else:
print(f'Warning: {paper_id} has been labeled!')
if len(paper_map) == 0:
print(f'Warning: {paper_id} can not been found!')
sys.exit(1)
| [
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
340,
861,
10141,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
3896,
62,
4871,
7483,
355,
3348,
62,
4871,
7483,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
275,
82,
19,
355,
... | 2.358859 | 2,664 |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Store
GUID : 9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8000, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8001, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8002, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8003, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8010, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8011, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8012, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8013, version=0)
@declare(guid=guid("9c2a37f3-e5fd-5cae-bcd1-43dafeee1ff0"), event_id=8014, version=0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
15905,
12,
11209,
12,
22658,
198,
38,
27586,
1058,
860,
66,
17,
64,
2718,
69,
18,
12,
68,
20,
16344,
12,
20,
66,
3609,
12,
65,
10210,
16,
12,
3559,
67,... | 1.871545 | 615 |
# -*- coding: utf-8 -*-
"""Defines the app context and commands."""
import os
import click
from nxstart import app
from nxstart.utils.strings import TITLE_TEXT, VERSION_STRING
from nxstart.version import __version__ as version
class Context(object):
"""
Context to be passed to the sub-commands.
"""
pass_context = click.make_pass_decorator(Context, ensure=True)
@click.group()
@click.option("--name", "-n", default=None, help="The name of your project")
@click.option("--author", "-a", default=None, help="The full name of the author")
@pass_context
@cli.command("libnx", short_help="create a new libnx project (C++)")
@click.option(
"--clion/--no-clion",
default=False,
prompt="Are you using CLion?",
help="include CMakeLists.txt",
)
@pass_context
def libnx(ctx, clion):
"""
Command for generating a libnx project.
:param ctx: Context
:param clion: Using CLion
"""
app.libnx(ctx.name, ctx.author, clion, ctx.cwd)
@cli.command("libt", short_help="create a new libtransistor project (C)")
@click.option(
"--clion/--no-clion",
default=False,
prompt="Are you using CLion?",
help="include CMakeLists.txt",
)
@pass_context
def libt(ctx, clion):
"""
Command for generating a libtransistor project.
:param ctx: Context
:param clion: Using CLion
"""
app.libt(ctx.name, ctx.author, clion, ctx.cwd)
@cli.command("brewjs", short_help="create a new BrewJS project (Javascript)")
@pass_context
def brewjs(ctx):
"""
Command for generating a BrewJS project.
:param ctx: Context
"""
app.brewjs(ctx.name, ctx.author, ctx.cwd)
@cli.command("pynx", short_help="create a new PyNX project (Python)")
@pass_context
def pynx(ctx):
"""
Command for generating a PyNX project.
:param ctx: Context
"""
app.pynx(ctx.name, ctx.author, ctx.cwd)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
7469,
1127,
262,
598,
4732,
290,
9729,
526,
15931,
198,
198,
11748,
28686,
198,
198,
11748,
3904,
198,
198,
6738,
299,
87,
9688,
1330,
598,
198,
6738,
299,
... | 2.59362 | 721 |
#!/usr/bin/env python
import os
import click
from ..log import echo_info, echo_normal, get_logger, verbosity_option
from ..release import get_gitlab_instance
from . import bdt
from .runners import (
_get_project,
_get_projects_from_file,
_get_projects_from_group,
)
logger = get_logger(__name__)
def _change_settings(project, info, dry_run):
"""Updates the project settings using ``info``"""
name = f"{project.namespace['name']}/{project.name}"
echo_normal(f"Changing {name}...")
if info.get("archive") is not None:
if info["archive"]:
echo_info(" -> archiving")
if not dry_run:
project.archive()
else:
echo_info(" -> unarchiving")
if not dry_run:
project.unarchive()
if info.get("description") is not None:
echo_info(f" -> set description to '{info['description']}'")
if not dry_run:
project.description = info["description"]
project.save()
if info.get("avatar") is not None:
echo_info(f" -> setting avatar to '{info['avatar']}'")
if not dry_run:
project.avatar = open(info["avatar"], "rb")
project.save()
@click.command(
epilog="""
Examples:
1. List settings in a gitlab project (bob/bob.devtools):
$ bdt gitlab settings bob/bob.devtools
2. Simulates an update to the project description:
$ bdt gitlab settings --description="new description" --dry-run bob/bob.devtools
"""
)
@click.argument("projects", nargs=-1, required=True)
@click.option(
"-a",
"--avatar",
default=None,
type=click.Path(file_okay=True, dir_okay=False, exists=True),
help="Set this to update the project icon (avatar)",
)
@click.option(
"-D",
"--description",
default=None,
type=str,
help="Set this to update the project description",
)
@click.option(
"-A",
"--archive/--unarchive",
default=None,
help="Set this to archive or unarchive a project",
)
@click.option(
"-d",
"--dry-run/--no-dry-run",
default=False,
help="Only goes through the actions, but does not execute them "
"(combine with the verbosity flags - e.g. ``-vvv``) to enable "
"printing to help you understand what will be done",
)
@verbosity_option()
@bdt.raise_on_error
def settings(projects, avatar, description, archive, dry_run):
"""Updates project settings"""
# if we are in a dry-run mode, let's let it be known
if dry_run:
logger.warn("!!!! DRY RUN MODE !!!!")
logger.warn("Nothing is being changed at Gitlab")
gl = get_gitlab_instance()
gl_projects = []
for target in projects:
if "/" in target: # it is a specific project
gl_projects.append(_get_project(gl, target))
elif os.path.exists(target): # it is a file with project names
gl_projects += _get_projects_from_file(gl, target)
else: # it is a group - get all projects
gl_projects += _get_projects_from_group(gl, target)
for k in gl_projects:
try:
logger.info(
"Processing project %s (id=%d)",
k.attributes["path_with_namespace"],
k.id,
)
info_to_update = {}
if avatar is not None:
info_to_update["avatar"] = avatar
if archive is not None:
info_to_update["archive"] = archive
if description is not None:
info_to_update["description"] = description
if not info_to_update:
# list current settings
s = f"{k.namespace['name']}/{k.name}"
if k.archived:
s += " [archived]"
s += f": {k.description}"
echo_normal(s)
else:
_change_settings(k, info_to_update, dry_run)
except Exception as e:
logger.error(
"Ignoring project %s (id=%d): %s",
k.attributes["path_with_namespace"],
k.id,
str(e),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
11748,
28686,
198,
198,
11748,
3904,
198,
198,
6738,
11485,
6404,
1330,
9809,
62,
10951,
11,
9809,
62,
11265,
11,
651,
62,
6404,
1362,
11,
15942,
16579,
62,
18076,
198,
6738,
1148... | 2.139209 | 1,997 |
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.625 | 40 |
import sys
import os
import math
import argparse
import json
import caffe
from caffe import layers as L # pseudo module using __getattr__ magic to generate protobuf messages
from caffe import params as P # pseudo module using __getattr__ magic to generate protobuf messages
if __name__ == "__main__":
# sys.argv[1]: name task (class included)
print "task: " + sys.argv[1]
# sys.argv[2]: folder prototxt folder (solver and model together + caffe weights to store)
print "prototxt folder: " + sys.argv[2]
# sys.argv[3]: lmdb folder
print "lmdb folder: " + sys.argv[3]
# sys.argv[4]: size patch
print "size patch: [" + sys.argv[4] + "," + sys.argv[4] + "]"
# sys.argv[5]: size batch
print "size batch: [" + sys.argv[5] + "," + sys.argv[5] + "]"
# sys.argv[6]: number of iterations CNN
print "num iter CNN: " + sys.argv[6]
# sys.argv[7]: number of parts
print "number of parts: " + sys.argv[7]
# sys.argv[8]: number of stages
print "number of stages: " + sys.argv[8]
# sys.argv[9]: keep or not the AR when resizing to crop_size
print "keep AR?: " + sys.argv[9]
# sys.argv[10]: number of padded pixels in output image
print "Padded pixels: " + sys.argv[10]
### Change here for different dataset
task_name = sys.argv[1]
directory = sys.argv[2] + '/' + task_name
dataFolder = sys.argv[3]
testFolder = dataFolder + '_val'
patchSize = int(sys.argv[4])
batch_size = int(sys.argv[5])
num_iter_cnn = int(sys.argv[6])
numParts = int(sys.argv[7]) + 1 # adding bg
numStages = int(sys.argv[8])
keepAR = bool(int(sys.argv[9]))
pad = int(sys.argv[10])
weights_folder = '%s' % directory # the place you want to store your caffemodel
base_lr = 8e-5
transform_param = dict(stride=8, crop_size_x=patchSize, crop_size_y=patchSize,
target_dist=1.171, scale_prob=0, scale_min=1.0, scale_max=1.0,
max_rotate_degree=0, center_perterb_max=0.85, do_clahe=False, num_parts=numParts-1,
np_in_lmdb=numParts-1, flip_prob=0.5, is_rigid=True, keep_ar=keepAR, pad=pad)
nCP = 3
CH = 128
if not os.path.exists(directory):
os.makedirs(directory)
layername = ['C', 'P'] * nCP + ['C','C','D','C','D','C'] + ['L'] # first-stage
kernel = [ 9, 3 ] * nCP + [ 5 , 9 , 0 , 1 , 0 , 1 ] + [0] # first-stage
outCH = [128, 128] * nCP + [ 32,512, 0 ,512, 0 , numParts] + [0] # first-stage
stride = [ 1 , 2 ] * nCP + [ 1 , 1 , 0 , 1 , 0 , 1 ] + [0] # first-stage
if numStages >= 2:
layername += ['C', 'P'] * nCP + ['$'] + ['C'] + ['@'] + ['C'] * 5 + ['L']
outCH += [128, 128] * nCP + [ 0 ] + [32 ] + [ 0 ] + [128,128,128,128,numParts] + [ 0 ]
kernel += [ 9, 3 ] * nCP + [ 0 ] + [ 5 ] + [ 0 ] + [11, 11, 11, 1, 1] + [ 0 ]
stride += [ 1 , 2 ] * nCP + [ 0 ] + [ 1 ] + [ 0 ] + [ 1 ] * 5 + [ 0 ]
for s in range(3, numStages+1):
layername += ['$'] + ['C'] + ['@'] + ['C'] * 5 + ['L']
outCH += [ 0 ] + [32 ] + [ 0 ] + [128,128,128,128,numParts] + [ 0 ]
kernel += [ 0 ] + [ 5 ] + [ 0 ] + [11, 11, 11, 1, 1 ] + [ 0 ]
stride += [ 0 ] + [ 1 ] + [ 0 ] + [ 1 ] * 5 + [ 0 ]
label_name = ['label_1st_lower', 'label_lower']
writePrototxts(dataFolder, testFolder, directory, batch_size, num_iter_cnn, layername, kernel, stride, outCH, transform_param,
base_lr, task_name, directory, weights_folder, label_name, patchSize, numParts, numStages) | [
11748,
25064,
198,
11748,
28686,
198,
11748,
10688,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
21121,
198,
6738,
21121,
1330,
11685,
355,
406,
220,
1303,
24543,
8265,
1262,
11593,
1136,
35226,
834,
5536,
284,
7716,
1237,
672,
... | 2.164489 | 1,684 |
from django.test import TestCase
from django.http import HttpRequest
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from rest_framework.request import Request
from django_user_interaction_log.scripts import (check_if_drf_request, get_clean_request_object, get_request_event_path, get_request_user)
class ScriptsFunctionsTestCase(TestCase):
"""This class run TestCases for the functions of scripts.py file"""
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
18453,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
... | 3.47651 | 149 |
import pytest
from brainscore.benchmarks.cadena2017 import AssemblyLoader
from . import check_standard_format
@pytest.mark.private_access
| [
11748,
12972,
9288,
198,
198,
6738,
14290,
7295,
13,
26968,
14306,
13,
66,
38047,
5539,
1330,
10006,
17401,
198,
6738,
764,
1330,
2198,
62,
20307,
62,
18982,
628,
198,
31,
9078,
9288,
13,
4102,
13,
19734,
62,
15526,
198
] | 3.615385 | 39 |
from pathlib import Path
from setuptools import setup
BASEDIR = Path(__file__).resolve().parent
setup(name='randomness',
version='0.1',
description='Generates random secrets (passwords, etc).',
long_description=((BASEDIR / 'README.rst').read_text()),
url='https://github.com/crowsonkb/randomness',
author='Katherine Crowson',
author_email='crowsonkb@gmail.com',
license='MIT',
packages=['randomness'],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['randomness=randomness.randomness:main'],
})
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
33,
42827,
4663,
796,
10644,
7,
834,
7753,
834,
737,
411,
6442,
22446,
8000,
628,
198,
40406,
7,
3672,
11639,
25120,
1108,
3256,
198,
220,
220,
22... | 2.518519 | 243 |
import atexit
import sys
import ctypes as ct
from multiprocessing import Process, Event, Pipe, Value, Array
import serial
from .sensor import ReSkinBase, ReSkinSettings, ReSkinData
class ReSkinProcess(Process):
"""
ReSkin Sensor process. Keeps datastream running in the background.
Attributes
----------
sensor : ReSkinSettings
SensorSettings object
_pipe_in, _pipe_out : Pipe
Two ends of a pipe for communicating with the sensor
_sample_cnt : int
ID samples and keep count to ensure unique samples are passed to querying
process
Methods
-------
start_streaming():
Start streaming data from ReSkin sensor
start_buffering(overwrite=False):
Start buffering ReSkin data. Call is ignored if already buffering
pause_buffering():
Stop buffering ReSkin data
pause_streaming():
Stop streaming data from ReSkin sensor
get_data(num_samples=5):
Return a specified number of samples from the ReSkin Sensor
get_buffer(timeout=1.0, pause_if_buffering=False):
Return the recorded buffer
"""
def __init__(self,sensor_settings,
chunk_size):
"""
Parameters
----------
sensor_settings : ReSkinSettings
Named tuple containing settings for ReSkin sensor
chunk_size : int
Quantum of data piped from buffer at one time.
"""
super(ReSkinProcess, self).__init__()
self.device_id = sensor_settings.device_id
self._pipe_in, self._pipe_out = Pipe()
self._sample_cnt = Value(ct.c_uint64)
self._buffer_size = Value(ct.c_uint64)
self._last_time = Value(ct.c_double)
self._last_delay = Value(ct.c_double)
self._last_reading = Array(ct.c_float, sensor_settings.num_mags * 4)
self.sensor_settings = sensor_settings
self._chunk_size = chunk_size
self._event_is_streaming = Event()
self._event_quit_request = Event()
self._event_sending_data = Event()
self._event_is_buffering = Event()
atexit.register(self.join)
@property
@property
def start_streaming(self):
"""Start streaming data from ReSkin sensor"""
if not self._event_quit_request.is_set():
self._event_is_streaming.set()
print('Started streaming')
def start_buffering(self, overwrite):
"""
Start buffering ReSkin data. Call is ignored if already buffering
Parameters
----------
overwrite : bool
Existing buffer is overwritten if true; appended if false. Ignored
if data is already buffering
"""
if not self._event_is_buffering.is_set():
if overwrite:
# Warn that buffer is about to be overwritten
print('Warning: Overwriting non-empty buffer')
self.get_buffer()
self._event_is_buffering.set()
else:
# Warn that data is already buffering
print('Warning: Data is already buffering')
def pause_buffering(self):
"""Stop buffering ReSkin data"""
self._event_is_buffering.clear()
def pause_streaming(self):
"""Stop streaming data from ReSkin sensor"""
self._event_is_streaming.clear()
def get_data(self, num_samples=5):
"""
Return a specified number of samples from the ReSkin Sensor
Parameters
----------
num_samples : int
Number of samples required
"""
# Only sends samples if streaming is on. Sends empty list otherwise.
samples = []
if num_samples <=0:
return samples
last_cnt = self._sample_cnt.value
samples = [self.last_reading]
while len(samples) < num_samples:
if not self._event_is_streaming.is_set():
print('Please start streaming first.')
return []
# print(self._sample_cnt.value)
if last_cnt == self._sample_cnt.value:
continue
last_cnt = self._sample_cnt.value
samples.append(self.last_reading)
return samples
def get_buffer(self, timeout, pause_if_buffering):
"""
Return the recorded buffer
Parameters
----------
timeout : int
Time to wait for data to start getting piped.
pause_if_buffering : bool
Pauses buffering if still running, and then collects and returns buffer
"""
# Check if buffering is paused
if self._event_is_buffering.is_set():
if not pause_if_buffering:
print('Cannot get buffer while data is buffering. Set pause_if_buffering=True to pause buffering and retrieve buffer')
return
else:
self._event_is_buffering.clear()
rtn = []
if self._event_sending_data.is_set() or self._buffer_size.value > 0:
self._event_sending_data.wait(timeout=timeout)
while self._pipe_in.poll() or self._buffer_size.value > 0:
rtn.extend(self._pipe_in.recv())
self._event_sending_data.clear()
return rtn
def join(self, timeout=None):
"""Clean up before exiting"""
self._event_quit_request.set()
self.pause_buffering()
self.pause_streaming()
super(ReSkinProcess, self).join(timeout)
def run(self):
"""This loop runs until it's asked to quit."""
buffer = []
# Initialize sensor
try:
self.sensor = ReSkinBase(
num_mags=self.sensor_settings.num_mags,
port=self.sensor_settings.port,
baudrate=self.sensor_settings.baudrate,
burst_mode=self.sensor_settings.burst_mode,
device_id=self.sensor_settings.device_id)
# self.sensor._initialize()
self.start_streaming()
except serial.serialutil.SerialException as e:
self._event_quit_request.set()
print('ERROR: ', e)
sys.exit(1)
is_streaming = False
while not self._event_quit_request.is_set():
if self._event_is_streaming.is_set():
if not is_streaming:
is_streaming = True
# Any logging or stuff you want to do when streaming has
# just started should go here:bool=False
self._sample_cnt.value += 1
if self._event_is_buffering.is_set():
buffer.append(self.last_reading)
self._buffer_size.value = len(buffer)
elif self._buffer_size.value > 0:
self._event_sending_data.set()
chk = self._chunk_size
while len(buffer)>0:
if chk > len(buffer):
chk = len(buffer)
self._pipe_out.send(buffer[0:chk])
buffer[0:chk] = []
self._buffer_size.value = len(buffer)
else:
if is_streaming:
is_streaming = False
# Logging when streaming just stopped
if self._buffer_size.value > 0:
self._event_sending_data.set()
chk = self._chunk_size
while len(buffer)>0:
if chk > len(buffer):
chk = len(buffer)
self._pipe_out.send(buffer[0:chk])
buffer[0:chk] = []
self._buffer_size.value = len(buffer)
self.pause_streaming()
if __name__ == '__main__':
test_settings = ReSkinSettings(
num_mags=5,
port="COM32",
baudrate=115200,
burst_mode=True,
device_id=1
)
# test_sensor = ReSkinBase(5, port="COM32", baudrate=115200)
test_proc = ReSkinProcess(test_settings, pipe_buffer_on_pause=True)
test_proc.start()
test_proc.start_streaming()
test_proc.start_buffering()
import time
time.sleep(2.0)
test_proc.pause_buffering()
print(len(test_proc.get_buffer()))
# print(test_proc.get_samples(100))
test_proc.pause_streaming()
# buf = test_proc.get_buffer()
# print('Buffer length: ', len(buf))
# print('Sample buffer element (last one): ', buf[-1])
# print('Last reading: ', test_proc.last_reading)
while True:
input('') | [
11748,
379,
37023,
201,
198,
11748,
25064,
201,
198,
11748,
269,
19199,
355,
269,
83,
201,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
11,
8558,
11,
36039,
11,
11052,
11,
15690,
201,
198,
201,
198,
11748,
11389,
201,
198,
201,
198,
... | 2.020646 | 4,456 |
from typing import cast, TYPE_CHECKING, Optional
from pyteal.types import TealType, require_type
from pyteal.config import NUM_SLOTS
from pyteal.errors import TealInputError, TealInternalError
from pyteal.ast.expr import Expr
if TYPE_CHECKING:
from pyteal.compiler import CompileOptions
class ScratchSlot:
"""Represents the allocation of a scratch space slot."""
# Unique identifier for the compiler to automatically assign slots
# The id field is used by the compiler to map to an actual slot in the source code
# Slot ids under 256 are manually reserved slots
nextSlotId = NUM_SLOTS
def __init__(self, requestedSlotId: int = None):
"""Initializes a scratch slot with a particular id
Args:
requestedSlotId (optional): A scratch slot id that the compiler must store the value.
This id may be a Python int in the range [0-256).
"""
if requestedSlotId is None:
self.id = ScratchSlot.nextSlotId
ScratchSlot.nextSlotId += 1
self.isReservedSlot = False
else:
if requestedSlotId < 0 or requestedSlotId >= NUM_SLOTS:
raise TealInputError(
"Invalid slot ID {}, should be in [0, {})".format(
requestedSlotId, NUM_SLOTS
)
)
self.id = requestedSlotId
self.isReservedSlot = True
def store(self, value: Expr = None) -> Expr:
"""Get an expression to store a value in this slot.
Args:
value (optional): The value to store in this slot. If not included, the last value on
the stack will be stored. NOTE: storing the last value on the stack breaks the typical
semantics of PyTeal, only use if you know what you're doing.
"""
if value is not None:
return ScratchStore(self, value)
return ScratchStackStore(self)
def load(self, type: TealType = TealType.anytype) -> "ScratchLoad":
"""Get an expression to load a value from this slot.
Args:
type (optional): The type being loaded from this slot, if known. Defaults to
TealType.anytype.
"""
return ScratchLoad(self, type)
ScratchSlot.__module__ = "pyteal"
ScratchIndex.__module__ = "pyteal"
class ScratchLoad(Expr):
"""Expression to load a value from scratch space."""
def __init__(
self,
slot: ScratchSlot = None,
type: TealType = TealType.anytype,
index_expression: Expr = None,
):
"""Create a new ScratchLoad expression.
Args:
slot (optional): The slot to load the value from.
type (optional): The type being loaded from this slot, if known. Defaults to
TealType.anytype.
index_expression (optional): As an alternative to slot,
an expression can be supplied for the slot index.
"""
super().__init__()
if (slot is None) == (index_expression is None):
raise TealInputError(
"Exactly one of slot or index_expressions must be provided"
)
if index_expression:
if not isinstance(index_expression, Expr):
raise TealInputError(
"index_expression must be an Expr but was of type {}".format(
type(index_expression)
)
)
require_type(index_expression, TealType.uint64)
if slot and not isinstance(slot, ScratchSlot):
raise TealInputError(
"cannot handle slot of type {}".format(type(self.slot))
)
self.slot = slot
self.type = type
self.index_expression = index_expression
ScratchLoad.__module__ = "pyteal"
class ScratchStore(Expr):
"""Expression to store a value in scratch space."""
def __init__(
self, slot: Optional[ScratchSlot], value: Expr, index_expression: Expr = None
):
"""Create a new ScratchStore expression.
Args:
slot (optional): The slot to store the value in.
value: The value to store.
index_expression (optional): As an alternative to slot,
an expression can be supplied for the slot index.
"""
super().__init__()
if (slot is None) == (index_expression is None):
raise TealInternalError(
"Exactly one of slot or index_expressions must be provided"
)
if index_expression:
if not isinstance(index_expression, Expr):
raise TealInputError(
"index_expression must be an Expr but was of type {}".format(
type(index_expression)
)
)
require_type(index_expression, TealType.uint64)
self.slot = slot
self.value = value
self.index_expression = index_expression
ScratchStore.__module__ = "pyteal"
class ScratchStackStore(Expr):
"""Expression to store a value from the stack in scratch space.
NOTE: This expression breaks the typical semantics of PyTeal, only use if you know what you're
doing.
"""
def __init__(self, slot: ScratchSlot):
"""Create a new ScratchStackStore expression.
Args:
slot: The slot to store the value in.
"""
super().__init__()
self.slot = slot
ScratchStackStore.__module__ = "pyteal"
| [
6738,
19720,
1330,
3350,
11,
41876,
62,
50084,
2751,
11,
32233,
198,
198,
6738,
12972,
660,
282,
13,
19199,
1330,
1665,
282,
6030,
11,
2421,
62,
4906,
198,
6738,
12972,
660,
282,
13,
11250,
1330,
36871,
62,
8634,
33472,
198,
6738,
129... | 2.336838 | 2,378 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from authapp.models import KpkUser
admin.site.register(KpkUser, KpkAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
198,
6738,
6284,
1324,
13,
27530,
1330,
509,
79,
74,
12982,
628,
198,
198,
28482,
13,
15654,
13,
30... | 2.944444 | 54 |
import pymysql as py
conn = py.Connect("172.21.4.120","ash","password","test")
if conn:
print("Successfully Connected")
cursor = conn.cursor()
rows = cursor.execute("SHOW TABLES")
data = cursor.fetchall()
for row in data:
print(row[0])
conn.close() | [
11748,
279,
4948,
893,
13976,
355,
12972,
198,
198,
37043,
796,
12972,
13,
13313,
7203,
23628,
13,
2481,
13,
19,
13,
10232,
2430,
1077,
2430,
28712,
2430,
9288,
4943,
198,
361,
48260,
25,
198,
220,
220,
220,
3601,
7203,
33244,
2759,
8... | 2.659794 | 97 |
from torch import nn
from torch.nn import functional as F
import torch
| [
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
11748,
28034,
628,
198,
220,
220,
220,
220,
198
] | 3.16 | 25 |
""" This module defines the Env class that describes a data setup for experiments and runs the sampling. """
import logging
from time import perf_counter as pc
from datetime import datetime
import numpy as np
import tensorflow as tf
from sklearn.model_selection import KFold
import evaluation.metrics as metrics
import utils
# add console logger
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(message)s', datefmt='%H:%M:%S')
class Env:
"""
Defines the experiment environment: data set, splits, common parameters.
Draws and stores the samples from the predictive distribution on the test set.
"""
def __init__(self):
""" Creates a new Env object. """
# set seeds
self.seed = 2305
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
# test case
self.env_name = None # name of the environment
self.model_name = None # name of the model
self.test_case_name = 'test' # name of the test
self.baseline_test_case_name = None # name of the test containing 'true' posterior
self.data_dir = None
# data
self.input_dim = None # number of feature
self.output_dim = None
self.data_size = None # number of rows
self.n_splits = 10
self.current_split = 0
self.train_x = list()
self.train_y = list()
self.test_x = list()
self.test_y = list()
# common model/sampler parameters
self.layers_description = None
self.model_parameters_size = None
self.batch_size = 10
self.chains_num = 1 # number of models to un in parallel; parameters are for each chain
self.n_chunks = 100 # samples are drawn and stored in chunks
self.n_samples = 100 # samples per chunk
self.thinning = 0 # number of samples to discard
self.sampler = None # sampler created for current split
self.sampler_factory = None
# other
self._log_handler = None
def get_default_sampler_params(self):
""" Creates default parameters for a Sampler. """
params = dict()
params['train_x'] = self.get_train_x()
params['train_y'] = self.get_train_y()
params['test_x'] = self.get_test_x()
params['test_y'] = self.get_test_y()
params['batch_size'] = self.batch_size
return params
def create_training_test_sets(self):
""" Split data set into training and test folds. """
# load input data
input_data = np.asarray(np.loadtxt('input/data.txt'), dtype=np.float32)
self.input_dim = input_data.shape[1] - 1
self.output_dim = 1
# align to batch size
batches = input_data.shape[0] // (self.batch_size * self.n_splits)
input_data = input_data[:batches * (self.batch_size * self.n_splits)]
self.data_size = input_data.shape[0]
print('Loaded input data, shape = {}'.format(input_data.shape))
# create splits
kfold = KFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)
print('Splits: {}'.format(self.n_splits))
# assume y is in the last column by default
for idx_train, idx_test in kfold.split(input_data):
self.train_x.append(input_data[idx_train, :-1])
self.train_y.append(input_data[idx_train, -1:])
self.test_x.append(input_data[idx_test, :-1])
self.test_y.append(input_data[idx_test, -1:])
# layers described as [number of neurons, dropout probability]
if self.layers_description is None:
self.layers_description = [[self.input_dim, 0.0], [100, 0.0], [100, 0.0], [self.output_dim, 0.0]]
def samples_per_chunk(self):
""" Returns the number of samples drawn in each chunk. """
return self.n_samples * (self.thinning + 1)
def get_train_x(self):
""" Returns current training set - x points. """
return self.train_x[self.current_split]
def get_train_y(self):
""" Returns current training set - y labels. """
return self.train_y[self.current_split]
def get_test_x(self):
""" Returns current test set - x points. """
return self.test_x[self.current_split]
def get_test_y(self):
""" Returns current test set - y labels. """
return self.test_y[self.current_split]
def setup_data_dir(self, serialise_name='env'):
""" Creates data directories and serialises the environment. """
self.data_dir = self.__create_test_dir_name()
utils.set_data_dir(self.data_dir)
if serialise_name is not None:
utils.serialize(serialise_name, self)
# configure file logging
self._log_handler = logging.FileHandler(filename=utils.DATA_DIR + '/env.log', mode='w')
self._log_handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(self._log_handler)
def run(self, store_data=True):
""" Runs the experiments. """
for split in range(self.n_splits):
self.current_split = split
self.__run_split(store_data)
def load_samples(self, split=0, discard_left=0., discard_right=0.):
""" Loads collected samples from a file. """
samples, split_dir = self.__deserialise_from_split('samples', split)
if discard_right > 0:
samples = samples[int(discard_left * samples.shape[0]):-int(discard_right * samples.shape[0])]
else:
samples = samples[int(discard_left * samples.shape[0]):]
return samples
def load_stats(self, split=0, discard_left=0., discard_right=0., key=None):
""" Loads collected statistics from a file. """
stats, split_dir = self.__deserialise_from_split('stats', split)
if discard_right > 0:
stats = stats[int(discard_left * len(stats)):-int(discard_right * len(stats))]
else:
stats = stats[int(discard_left * len(stats)):]
if key is not None:
stats = np.asarray(list(map(key, stats)))
return stats
def load_times(self, split=0, discard_left=0., discard_right=0.):
""" Loads sampling times from a file. """
return self.load_stats(split=split, discard_left=discard_left, discard_right=discard_right,
key=lambda stat: stat.time)
def compute_rmse(self, samples, test_y=None):
""" Computer RMSE on the test set. """
samples = samples.squeeze()
test_y = test_y if test_y is not None else self.get_test_y()
test_y = test_y.squeeze()
mean_prediction = samples.mean(axis=0)
rmse = (np.mean((test_y - mean_prediction) ** 2)) ** .5
return rmse
def compute_metrics(self, baseline_samples, target_samples, discard_target=0.,
resample_baseline=1000, resample_target=1000, metric_names=None):
""" Computes distribution metrics with respect to the baseline. """
baseline_samples = metrics.resample_to(baseline_samples, resample_baseline)
target_samples = target_samples[int(discard_target * target_samples.shape[0]):]
target_samples = metrics.resample_to(target_samples, resample_target)
test_y = self.get_test_y()
results = dict()
if metric_names is None:
metric_names = ['KS', 'KL', 'Precision', 'Recall']
if 'RMSE' in metric_names:
results['RMSE'] = self.compute_rmse(target_samples, test_y=test_y)
metric_names.remove('RMSE')
for metric_name in metric_names:
metric_fn = metrics.METRICS_INDEX[metric_name]
values = list()
for test_point in range(baseline_samples.shape[1]):
values.append(metric_fn(baseline_samples[:, test_point], target_samples[:, test_point]))
results[metric_name] = np.mean(values)
return results
def __run_split(self, store_data):
""" Creates the sampler for the current split and draws the samples. """
logging.info('Split: {} / {}'.format(self.current_split + 1, self.n_splits))
utils.set_data_dir(self.data_dir + '/split-' + str(self.current_split))
logging.info('Data directory: {}'.format(utils.DATA_DIR))
self.sampler = self.sampler_factory()
with open(utils.DATA_DIR + '/sampler.txt', 'w') as f:
f.write(self.sampler.__repr__())
logging.info('Total samples to draw: {}, samples per chain: {}, total samples to store: {}'
.format(self.samples_per_chunk() * self.chains_num * self.n_chunks,
self.samples_per_chunk() * self.n_chunks,
self.n_samples * self.chains_num * self.n_chunks))
samples_drawn = 0
with tf.Session() as session:
tf.global_variables_initializer().run()
collected_samples = list()
collected_stats = list()
collected_params = list()
# sample in chunks
elapsed_ema = None
for chunk in range(self.n_chunks):
start = pc()
# fit the model
self.sampler.fit()
# draw samples into current chunk
for sample in range(self.n_samples):
# import pdb; pdb.set_trace()
if self.thinning > 0:
_ = [self.sampler.sample_predictive(session=session, is_discarded=True) for _ in
range(self.thinning)]
sample, stats, params = self.sampler.sample_predictive(return_stats=True, session=session)
collected_samples.extend(sample)
collected_stats.extend(stats)
if params:
collected_params.extend(params)
samples_drawn += self.chains_num * self.n_samples
# report stats
elapsed = pc() - start
elapsed_ema = .1 * elapsed + .9 * elapsed_ema if elapsed_ema is not None else elapsed
remaining = (self.n_chunks - chunk - 1) * elapsed_ema
remaining = (remaining // 60, int(remaining) % 60)
lag = self.n_samples * self.chains_num
stats = collected_stats[-lag:]
min_loss = min(stats, key=lambda s: s.loss).loss ** .5
max_loss = max(stats, key=lambda s: s.loss).loss ** .5
min_norm = min(stats, key=lambda s: s.norm).norm
max_norm = max(stats, key=lambda s: s.norm).norm
min_rate = min(stats, key=lambda s: s.rate).rate
max_rate = max(stats, key=lambda s: s.rate).rate
min_step = min(stats, key=lambda s: s.step).step
max_step = max(stats, key=lambda s: s.step).step
min_noise = min(stats, key=lambda s: s.noise_var).noise_var
max_noise = max(stats, key=lambda s: s.noise_var).noise_var
samples = collected_samples[-lag:]
test_rmse = self.compute_rmse(np.asarray(samples))
logging.info('Chunk = {}/{}, elapsed = {:.1f}s, '.format(chunk + 1, self.n_chunks, elapsed) +
'remain = {:02.0f}:{:02.0f}, test RMSE: {:.10f}, '.format(remaining[0], remaining[1], test_rmse) +
'rate = {:.2f}-{:.2f}, loss = {:.2f}-{:.2f}, '.format(min_rate, max_rate, min_loss, max_loss) +
'norm = {:.2f}-{:.2f}, step = {:.2f}-{:.2f}, '.format(min_norm, max_norm, min_step, max_step) +
'noise var = {:.2f}-{:.2f}'.format(min_noise, max_noise))
# store collected data
if store_data and (((chunk + 1) % 10 == 0) or ((chunk + 1) == self.n_chunks)):
start = pc()
utils.serialize('samples', np.asarray(collected_samples))
utils.serialize('stats', collected_stats)
utils.serialize('params', np.asarray(collected_params))
logging.info('---> Collections serialized in {:.0f} seconds.'.format(pc() - start))
logging.info('Sampling complete')
logging.getLogger().removeHandler(self._log_handler)
| [
37811,
770,
8265,
15738,
262,
2039,
85,
1398,
326,
8477,
257,
1366,
9058,
329,
10256,
290,
4539,
262,
19232,
13,
37227,
198,
11748,
18931,
198,
6738,
640,
1330,
23035,
62,
24588,
355,
40653,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,... | 2.199359 | 5,613 |
import logging
import numpy as np
import torch
import torch.nn as nn
| [
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628
] | 3.5 | 20 |
import os
# Database connection setup
| [
11748,
28686,
628,
198,
2,
24047,
4637,
9058,
198
] | 4.444444 | 9 |
from typing import List, Optional, Dict, Tuple
from secrets import token_bytes
from blspy import PrivateKey, AugSchemeMPL
from src.types.condition_var_pair import ConditionVarPair
from src.types.condition_opcodes import ConditionOpcode
from src.types.program import Program
from src.types.coin import Coin
from src.types.coin_solution import CoinSolution
from src.types.spend_bundle import SpendBundle
from src.util.clvm import int_to_bytes, int_from_bytes
from src.util.condition_tools import (
conditions_by_opcode,
pkm_pairs_for_conditions_dict,
conditions_for_solution,
)
from src.util.ints import uint32
from src.wallet.puzzles.p2_conditions import puzzle_for_conditions
from src.wallet.puzzles.p2_delegated_puzzle import puzzle_for_pk
from src.wallet.puzzles.puzzle_utils import (
make_assert_coin_consumed_condition,
make_assert_my_coin_id_condition,
make_create_coin_condition,
make_assert_block_index_exceeds_condition,
make_assert_block_age_exceeds_condition,
make_assert_aggsig_condition,
make_assert_time_exceeds_condition,
make_assert_fee_condition,
)
from src.wallet.derive_keys import master_sk_to_wallet_sk
| [
6738,
19720,
1330,
7343,
11,
32233,
11,
360,
713,
11,
309,
29291,
198,
198,
6738,
13141,
1330,
11241,
62,
33661,
198,
6738,
698,
2777,
88,
1330,
15348,
9218,
11,
2447,
27054,
1326,
44,
6489,
198,
198,
6738,
12351,
13,
19199,
13,
31448... | 2.917706 | 401 |