id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
33022 | import random
from torchvision.transforms import functional as F
from torchvision.transforms import transforms
from PIL import Image | StarcoderdataPython |
1624582 | import pytest
@pytest.fixture
def context():
class Context(object):
pass
return Context()
| StarcoderdataPython |
6552868 | # coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import io
import zipfile
import tempfile
import unittest
from six.moves import filterfalse
import fs.test
import fs.wrap
import fs.errors
import fs.memoryfs
import fs.archive.zipfs
from fs.path import relpath, join, forcedir, abspath, recursepath
from fs.archive.test import ArchiveReadTestCases, ArchiveIOTestCases
FS_VERSION = tuple(map(int, fs.__version__.split('.')))
def zip_compress(handle, source_fs):
if hasattr(handle, 'seek') and handle.seekable():
handle.seek(0)
saver = fs.archive.zipfs.ZipSaver(handle, False)
saver.save(source_fs)
class TestZipFS(fs.test.FSTestCases, unittest.TestCase):
def make_fs(self):
self.tempfile = tempfile.mktemp()
return fs.archive.zipfs.ZipFS(self.tempfile)
def destroy_fs(self, fs):
fs.close()
if os.path.exists(self.tempfile):
os.remove(self.tempfile)
del self.tempfile
@unittest.skipIf(FS_VERSION < (2, 4, 15), "fails because of PyFilesystem2#509")
def test_move(self):
super(TestZipFS, self).test_move()
@unittest.skipIf(FS_VERSION < (2, 4, 15), "fails because of PyFilesystem2#509")
def test_move_file_same_fs(self):
super(TestZipFS, self).test_move_file_same_fs()
class TestZipReadFS(ArchiveReadTestCases, unittest.TestCase):
long_names = True
unicode_names = True
compress = staticmethod(zip_compress)
make_source_fs = fs.memoryfs.MemoryFS
_archive_read_fs = fs.archive.zipfs.ZipReadFS
@staticmethod
def remove_archive(handle):
handle.close()
def setUp(self):
handle = io.BytesIO()
super(TestZipReadFS, self).setUp(handle)
class TestZipFSio(ArchiveIOTestCases, unittest.TestCase):
compress = staticmethod(zip_compress)
make_source_fs = fs.memoryfs.MemoryFS
load_archive = fs.archive.zipfs.ZipFS
_archive_fs = fs.archive.zipfs.ZipFS
@staticmethod
def iter_files(handle):
if hasattr(handle, 'seek') and handle.seekable():
handle.seek(0)
with zipfile.ZipFile(handle) as z:
for name in filter(None, z.namelist()):
if not name.endswith('/'):
yield abspath(name)
@staticmethod
def iter_dirs(handle):
zipname = lambda n: abspath(n).rstrip('/')
seen = set()
root_filter = '/'.__contains__
if hasattr(handle, 'seek') and handle.seekable():
handle.seek(0)
with zipfile.ZipFile(handle) as z:
for name in z.namelist():
# directory defined in the zipfile
if name.endswith('/'):
seen.add(name)
yield zipname(name)
# implicit directory
else:
for path in filterfalse(root_filter, recursepath(name)):
if path != abspath(name) and not path in seen:
seen.add(path)
yield zipname(path)
| StarcoderdataPython |
6424725 | from typing import Callable
import requests
from .... import IGraphResponse, IGraphFilter, IGraphAction
from ...abstractions.ISharepointDocumentLibrary import ISharepointDocumentLibrary
from ...abstractions.ISharepointList import ISharepointList
from ...utilities.sharepoint_graph_client_base import SharepointGraphClientBase
from ..list.sharepoint_list import SharepointList
from ..documentlibrary.sharepoint_document_library import SharepointDocumentLibrary
from .... import GraphResponseBase
class SharepointSite:
def __init__(self, site:str, client:SharepointGraphClientBase):
self.site = site
self.list = None
self.library = None
self.client = client
self.graph_filters:list[IGraphFilter] = []
self.graph_request = GraphResponseBase()
def lists(self, list_name:str = None) -> ISharepointList:
self.list = SharepointList(self, self.client, list_name)
return self.list
def filters(self, filter_func:Callable[...,list[IGraphFilter]]) -> IGraphAction:
self.graph_filters = filter_func()
return self
def documents(self, library_name:str) -> ISharepointDocumentLibrary:
self.library = SharepointDocumentLibrary(library_name, self, self.client)
return self.library
def get(self, url:str = None) -> IGraphResponse:
request_url = url or f"{self.client.GRAPH_BASE_URI}{self.build_url()}"
if not url:
request_url += self.build_filter_query()
r = requests.get(request_url, headers=self.client.conn.headers)
self.graph_request.add_response(r)
if self.graph_request not in self.client.requests:
self.client.add_request(self.graph_request)
return self.graph_request
def build_url(self) -> str:
request_url = f"sites/root:/sites/{self.site}:/"
return request_url
def build_filter_query(self) -> str:
if len(self.graph_filters) < 1:
return ""
filter_query = "&".join([f.compose() for f in self.graph_filters])
return f"?{filter_query}" | StarcoderdataPython |
6432684 | <reponame>i1caro/hotel
import logging
# import os
# create logger
logging.basicConfig(
filename='python.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S'
)
# logger = logging.getLogger('hotel')
# # create file handler which logs even info messages
# file_log = logging.FileHandler(os.path.join(os.getcwd(), 'python.log'))
# # create console handler with a higher log level
# console_log = logging.StreamHandler()
# # create formatter and add it to the handlers
# formatter = logging.Formatter(
#
# )
# file_log.setFormatter(formatter)
# console_log.setFormatter(formatter)
# # add the handlers to the logger
# logger.addHandler(file_log)
# logger.addHandler(console_log)
| StarcoderdataPython |
9635159 | # Generated by Django 3.0.7 on 2020-06-27 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coletaeapi', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='collectionpoint',
name='address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='collectionpoint',
name='addressComplement',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='collectionpoint',
name='city',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='collectionpoint',
name='items',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='collectionpoint',
name='name',
field=models.CharField(blank=True, max_length=255, unique=True),
),
migrations.AddField(
model_name='collectionpoint',
name='uf',
field=models.CharField(blank=True, max_length=2, null=True),
),
]
| StarcoderdataPython |
248113 | #!/usr/bin/env python
# Author: <NAME>
# Core Python Pogramming - Homework 9
import os, imp
import string, random
def file_fliter(name): # 9-1
fhand = open(name)
for line in fhand:
if line[0] != '#':
print line,
fhand.close()
def read_file(): # 9-2
fhand = open(raw_input('File name: '))
number = int(raw_input('Line number: '))
i = 0
for line in fhand:
if i < number:
print line,
i += 1
fhand.close()
def line_number(fhand=None): # 9-4
if not fhand:
fhand = open(raw_input('File name: '))
idx = 0
for line in fhand:
print line,
if idx >= 24:
ans = raw_input('Do you want to continue? (y/n) ')
if ans[0] == 'y':
idx = 0
else:
break
else:
idx += 1
fhand.close()
def compare_files(): # 9-6
file_1 = [line.rstrip() for line in open(raw_input('First file: '))]
file_2 = [line.rstrip() for line in open(raw_input('Second file: '))]
file_1.close()
file_2.close()
idx = 0
for i in xrange(max(len(file_1), len(file_2))):
if file_1[i] != file_2[i]:
break
else:
print "Two files are same."
return None
print i+1
def services_comp(): # 9-7
ports = set([])
fhand_s = open('/etc/services')
exp = file('Port_Numbers.txt', 'w')
for line in fhand_s:
if line[0] not in string.letters:
continue
fragments = line.split()
exp.write(fragments[1][:-4]+' '+fragments[0]+'\n')
fhand_s.close()
exp.close()
def module_doc(): # 9-9
output = open('Docstring_of_Standard_Library.txt', 'w')
os.chdir('/usr/lib/python2.7')
for filename in os.listdir('/usr/lib/python2.7'):
if filename[-3:] == '.py':
fhand = open(filename)
docs = ''
is_doc = False
for line in fhand:
if "'''" in line[:4] or '"""' in line[:4]:
if line[-4:-1] == "'''" or line[-4:-1] == '"""' and len(line)>4:
docs += line[:-4] + '\n'
break
if is_doc:
break
is_doc = True
elif line[-4:-1] == "'''" or line[-4:-1] == '"""':
docs += line
break
if is_doc:
docs += line
fhand.close()
if docs:
output.write(filename[:-3]+
'\n=====================================================================\n'
+docs.lstrip('r\'"')+'\n\n\n\n\n')
else:
output.write(filename[:-3]+
'\n=====================================================================\n'
+'No docstring. \n'+'\n\n\n\n\n')
def bookmark_manage(): # 9-11
'''
!!! This program needs to be TOTALLY REWRITTEN !!!
'''
print '''
Bookmark Manager
=========================================
'''
main_menu = '''
(L)ook
(A)dd
(E)dit
(S)earch
(Q)uit
'''
while True:
choice = raw_input(main_menu).lower()
if choice[0] == 'l':
try:
fhand = open('bookmarks.txt', 'r')
except IOError:
new = open('bookmarks.txt', 'w')
new.close()
fhand = open('bookmarks.txt', 'r')
print
for item in fhand:
print item.rstrip()
fhand.close()
elif choice[0] == 'a':
try:
fhand = open('bookmarks.txt', 'a')
except IOError:
new = open('bookmarks.txt', 'w')
new.close()
fhand = open('bookmarks.txt', 'a')
new_item = (raw_input('Name: '), raw_input('Address: '))
fhand.write(new_item[0]+': '+new_item[1]+'\n')
print 'New bookmark added! \n'
elif choice[0] == 'e':
try:
fhand = open('bookmarks.txt', 'r+')
except IOError:
print 'No bookmark yet!\n'
continue
for item in fhand:
print item
name = raw_input('Input name to change: ')
for idx, item in enumerate(fhand):
if item.split(': ')[0] == name:
fhand.seek(idx)
new_add = raw_input('New address: ')
fhand.write(item.split(': ')[0]+': '+new_add+'\n')
fhand.close()
else:
print 'Quitted!'
break
def list_argv(): # 9-13
from sys import argv
for a in argv:
print a
def copy_files(): # 9-15
while True:
try:
file_1 = open(raw_input('Input file: '))
break
except IOError:
print 'File doesn\'t exist!'
file_2 = open(raw_input('Copy to: '), 'w')
for line in file_1:
file_2.write(line)
file_1.close()
file_2.close()
def text_editor(): # 9-17
main_menu = '''
===========================================================
(C)reate new file
(S)how a file
(E)dit a file
(Q)uit
===========================================================
'''
while True:
mode = raw_input(main_menu)[0].lower()
if mode == 'q':
break
filename = raw_input('File name: ')
if mode == 'c':
if not os.path.isfile(filename):
fhand = open(filename, 'w')
print 'Input\nPrint \':q!\' to exit\n'
line = ''
while line != ':q!':
fhand.write(line)
line = raw_input()+'\n'
fhand.close()
continue
print 'File name exists!'
elif mode == 's':
if os.path.isfile(filename):
fhand = open(filename)
for line in fhand:
print line,
print
fhand.close()
else:
print 'File doesn\'t exist!'
elif mode == 'e':
if os.path.isfile(filename):
fhand = open(filename, 'r+')
for idx, line in enumerate(fhand, start=1):
print '%4d '%idx, line,
fhand.seek(0, 0)
lines = fhand.readlines()
fhand.close()
while raw_input('Edit a line? (y/n) ')[0].lower() == 'y':
line_num = int(raw_input('Choose the line number to change: '))
print lines[line_num-1],
lines[line_num-1] = raw_input('Input\n') + '\n'
fhand = open(filename, 'w')
fhand.writelines(lines)
fhand.close()
def search_file(): # 9-18
count = 0
target = chr(int(raw_input('Chosen ASCII Code (0-255): ')))
for line in open(raw_input('File Name: ')):
for c in line:
if c == target:
count += 1
print 'Number: %d' %count
def create_file(): # 9-19
test = ''
used_num = [-1]
field = range(255)
target = int(raw_input('Chosen ASCII Code (0-255): '))
field.remove(target)
times = int(raw_input('Times of appearance: '))
length = int(raw_input('Length:'))
for i in xrange(length):
test += chr(random.choice(field))
for i in xrange(times):
temp = list(test)
p = -1
while p in used_num:
p = random.randrange(length)
temp[p] = chr(target)
test = ''.join(temp)
print test
if __name__ == '__main__':
# file_fliter(raw_input('File name: '))
text_editor()
| StarcoderdataPython |
3557120 | from .common import add_history_args, add_vehicle_args, format_duration_minutes, get_vehicle
from datetime import datetime
from tabulate import tabulate
help_text = 'Show charging statistics for your vehicle.'
def configure_parser(parser):
add_vehicle_args(parser)
add_history_args(parser)
parser.add_argument('--period', help='Period over which to aggregate', choices=['day', 'month'], default='month')
def run(parsed_args):
v = get_vehicle(parsed_args)
now = datetime.utcnow()
if parsed_args.from_date:
from_date = min(parsed_args.from_date, now)
else:
from_date = now.replace(day=1)
if parsed_args.to:
to_date = min(parsed_args.to, now)
else:
to_date = now
print(
tabulate(
[_format_charge_stat(s) for s in v.charge_statistics(from_date, to_date, parsed_args.period)],
headers={
'day': 'Day',
'month': 'Month',
'totalChargesNumber': 'Number of charges',
'totalChargesDuration': 'Total time charging',
'totalChargesErrors': 'Errors'
}
)
)
def _format_charge_stat(s):
s['totalChargesDuration'] = format_duration_minutes(s.get('totalChargesDuration', 0))
return s
| StarcoderdataPython |
1752280 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from solo.admin import SingletonModelAdmin
from events.models import Event, Profile, Team, Event, TeamMembership
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
pass
@admin.register(Event)
class EventAdmin(SingletonModelAdmin):
pass
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
pass
@admin.register(TeamMembership)
class TeamMembershipAdmin(admin.ModelAdmin):
pass
| StarcoderdataPython |
5124520 | <filename>api/test/app/batch/test_db_data_exporter.py
"""
dc-test /usr/test/app/batch/test_db_data_exporter.py
"""
from unittest import mock
import pytest
from src.app.batch.db_data_exporter import DBDataExporter
from src.app.config.hard_constants import HardConstants
from src.app.models.comment import CommentDAO
from src.app.models.job_log import JobLogDAO, JobLogType, JobLogStatus
from src.app.models.nicoru import NicoruDAO
from test.app.db_test_helper import db_test_session
from test.app.models.data import TestData
class TestDBDataExporter:
class Test_execute:
def test_success(self):
with db_test_session() as session:
# setup
NicoruDAO(session).nicoru(TestData.VIDEO_ID_1, TestData.COMMENT_ID_1)
CommentDAO(session).add(
id=TestData.COMMENT_ID_1,
video_id=TestData.VIDEO_ID_1,
text=TestData.Comment.TEXT_1,
posted_at=TestData.Comment.POSTED_AT_1,
posted_by=TestData.Comment.POSTED_BY_1,
point=TestData.Comment.POINT_1,
was_deleted=TestData.Comment.WAS_DELETED_1,
official_nicoru=TestData.Comment.OFFICIAL_NICORU_1,
)
session.commit()
HardConstants.App = HardConstants.Test
# run
DBDataExporter.execute()
# verify
with open(HardConstants.App.REPORT_CSV, 'r') as f:
assert f.readlines() == [
'"動画ID","コメ番","コメント","擬似ニコる","公式ニコる"\n',
'"{vid}","{cid}","{c}","1","{o_n}"\n'.format(
vid=TestData.VIDEO_ID_1,
cid=TestData.COMMENT_ID_1,
c=TestData.Comment.TEXT_1,
o_n=TestData.Comment.OFFICIAL_NICORU_1),
]
assert JobLogDAO(session).find_by_type(JobLogType.DB_DATA_EXPORT).status == JobLogStatus.DONE
def test_failure(self):
with db_test_session() as session:
# setup
HardConstants.App = HardConstants.Test
with mock.patch.object(DBDataExporter, 'export_public_data', side_effect=Exception):
# run
with pytest.raises(Exception):
DBDataExporter.execute()
# verify
assert JobLogDAO(session).find_by_type(JobLogType.DB_DATA_EXPORT).status == JobLogStatus.ABORTED
class Test_row_is_valid:
def test_valid(self):
assert DBDataExporter.row_is_valid((TestData.VIDEO_ID_1, TestData.COMMENT_ID_1, 0, 0, 0))
def test_invalid_vid(self):
assert not DBDataExporter.row_is_valid((TestData.VIDEO_ID_1, '', 0, 0, 0))
def test_invalid_cid(self):
assert not DBDataExporter.row_is_valid(('', TestData.COMMENT_ID_1, 0, 0, 0))
| StarcoderdataPython |
3219393 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 17:39:25 2020
@author: prachi
"""
import os
import sys
import numpy as np
import subprocess
import pickle
import argparse
#updating
def setup():
"""Get cmds and setup directories."""
cmdparser = argparse.ArgumentParser(description='convert kaldi PCA transform and mean into pickle format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdparser.add_argument('--kaldi_feats_path', help='path of folder where transform.mat and mean.vec stored', type=str,required=True)
cmdparser.add_argument('--dataset', help='dataset name', type=str, default="callhome1",required=True)
cmdparser.add_argument('--output_dir', help='path of SSC main folder to store pickle file', type=str, default="None",required=True)
cmdargs = cmdparser.parse_args()
return cmdargs
def kaldiPlda2numpydict(pldaFile):
#logging.debug('kaldi text file to numpy array: {}'.format(textfile))
fin = subprocess.check_output(["ivector-copy-plda", "--binary=false", pldaFile ,"-"])
res = {}
fin = fin.decode("utf-8").split('\n')
while '' in fin:
fin.remove('')
splitted = fin[0].strip().split()
res['plda_mean'] = np.asarray(splitted[2:-1]).astype(float)
tmparr=[]
for i,line in enumerate(fin[2:]):
splitted = line.strip().split()
if splitted[-1] == ']':
splitted = splitted[:-1]
tmparr.append(np.asarray(splitted).astype(float))
break
else:
tmparr.append(np.asarray(splitted).astype(float))
res['diagonalizing_transform'] = np.asarray(tmparr)
res['Psi_across_covar_diag'] = np.asarray(fin[-2].strip().split()[1:-1]).astype(float)
return res
def load_kaldi_matrices(args):
fold_local = args.kaldi_feats_path
dataset = args.dataset
out_fold = args.output_dir
os.system('mkdir -p {}/lists/{}'.format(out_fold,dataset))
outpicklefile = '{}/lists/{}/plda_{}.pkl'.format(out_fold,dataset,dataset)
if os.path.isfile(outpicklefile):
print("file exits!")
return
plda_file = '{}/plda'.format(fold_local)
if os.path.isfile(plda_file):
plda = kaldiPlda2numpydict(plda_file)
else:
print('plda model does not exist!')
plda = {}
transform_mat_file = '{}/transform.mat'.format(fold_local)
mean_vec_file = '{}/mean.vec'.format(fold_local)
transform_mat = np.asarray([w.split() for w in np.asarray(subprocess.check_output(["copy-matrix","--binary=false", transform_mat_file, "-"]).decode('utf-8').strip()[2:-2].split('\n'))]).astype(float)
mean_vec = np.asarray(subprocess.check_output(["copy-vector", "--binary=false",mean_vec_file, "-"]).decode('utf-8').strip()[1:-2].split()).astype(float)
plda['transform_mat'] = transform_mat
plda['mean_vec'] = mean_vec
with open(outpicklefile,'wb') as f:
pickle.dump(plda,f)
if __name__=='__main__':
args = setup()
load_kaldi_matrices(args)
| StarcoderdataPython |
3346685 | version = "0.90.20190728"
| StarcoderdataPython |
3291147 | import numpy as np
from spearmint.grids import sobol_grid
def evaluate(job_id, params):
x = params['X']
y = params['Y']
z = params['Z']
#print 'Evaluating at (%f, %f, %f)' % (x, y, z)
obj1 = float(x*np.power(16.0+np.power(z,2.0),0.5)+y*np.power(1.0+np.power(z,2.0),0.5)) + np.random.normal(0,0.00008)
obj2 = float(np.max((20.0*np.power(16.0+np.power(z,2.0)/0.5)/(x*z),80.0*np.power(1.0+np.power(z,2.0),0.5)/(y*z)))) + np.random.normal(0,1120)
c1 = (float(np.power(10.0,5.0)-np.max((20.0*np.power(16.0+np.power(z,2.0)/0.5)/(x*z),80.0*np.power(1.0+np.power(z,2.0),0.5)/(y*z)))) + np.random.normal(0,0.1)) / 103324.527134
return {
"o1" : obj1,
"o2" : obj2,
"c1" : c1 * -1.0
}
def test_grid():
x = np.linspace( 0.0001,0.01,200 )
y = np.linspace( 0.0001,0.01,200 )
z = np.linspace( 1,3,200 )
x,y,z = np.meshgrid( x,y,z )
c1a = 20.0*np.power(16.0+np.power(z,2.0),0.5)/(x*z)
c1b = 80.0*np.power(1.0+np.power(z,2.0),0.5)/(y*z)
c1 = np.power(10.0,5.0)-np.maximum(c1a,c1b)
var1 = np.var(c1)
print np.sqrt(var1)
def obj1(grid):
return grid[:,0]*np.power(16.0+np.power(grid[:,2],2.0),0.5)+grid[:,1]*np.power(1.0+np.power(grid[:,2],2.0),0.5)
def obj2(grid):
return np.maximum(20.0*np.power(16.0+np.power(grid[:,2],2.0),0.5)/(grid[:,0]*grid[:,2]),80.0*np.power(1.0+np.power(grid[:,2],2.0),0.5)/(grid[:,1]*grid[:,2]))
def c1(grid):
return (((np.power(10.0,5.0)-np.maximum(20.0*np.power(16.0+np.power(grid[:,2],2.0),0.5)/(grid[:,0]*grid[:,2]),80.0*np.power(1.0+np.power(grid[:,2],2.0),0.5)/(grid[:,1]*grid[:,2])))+np.random.normal(0,0.1)) / 103324.527134)*-1.0
def get_functions_borders(num_vars = 3, grid_size = 1000000, noise = 0.1):
grid = sobol_grid.generate( num_vars , grid_size )
# Scale grid.
grid[:,0] = grid[:,0] * ( 0.01 - 0.0001 ) + 0.0001
grid[:,1] = grid[:,1] * ( 0.01 - 0.0001 ) + 0.0001
grid[:,2] = grid[:,2] * ( 3.0 - 1.0 ) + 1.0
print("Statistics over the objectives and constraints")
print("==============================================")
first_obj_observations = obj1(grid)
second_obj_observations = obj2(grid)
first_con_observations = c1(grid)
max_first_obj = np.max(first_obj_observations)
min_first_obj = np.min(first_obj_observations)
max_second_obj = np.max(second_obj_observations)
min_second_obj = np.min(second_obj_observations)
max_first_con = np.max(first_con_observations)
min_first_con = np.min(first_con_observations)
print("Maximum observation of the first objective")
print(max_first_obj)
print("Minimum observation of the first objective")
print(min_first_obj)
print("Noise factor")
print((max_first_obj-min_first_obj)*noise)
print("Maximum observation of the second objective")
print(max_second_obj)
print("Minimum observation of the second objective")
print(min_second_obj)
print("Noise factor")
print((max_second_obj-min_second_obj)*noise)
print("Maximum observation of the first constraint")
print(max_first_con)
print("Minimum observation of the first constraint")
print(min_first_con)
print("Noise factor")
print((max_first_con-min_first_con)*noise)
def main(job_id, params):
try:
return evaluate(job_id, params)
except Exception as ex:
print ex
print 'An error occurred in mocotoy_con.py'
return np.nan
if __name__ == "__main__":
#main(0, {u'X': np.array([ 5.0 ]), u'Y': np.array([ 2.8 ]), u'Z': np.array([ 1.0]) })
get_functions_borders()
| StarcoderdataPython |
12860471 | <gh_stars>0
import climate
import glob
import gzip
import io
import lmj.cubes
import logging
import numpy as np
import os
import pandas as pd
import pickle
import theanets
def compress(source, k, activation, **kwargs):
fns = sorted(glob.glob(os.path.join(source, '*', '*_jac.csv.gz')))
logging.info('%s: found %d jacobians', source, len(fns))
# the clipping operation affects about 2% of jacobian values.
dfs = [np.clip(pd.read_csv(fn, index_col='time').dropna(), -10, 10)
for fn in fns]
B, N = 128, dfs[0].shape[1]
logging.info('loaded %s rows of %d-D data from %d files',
sum(len(df) for df in dfs), N, len(dfs))
def batch():
batch = np.zeros((B, N), 'f')
for b in range(B):
a = np.random.randint(len(dfs))
batch[b] = dfs[a].iloc[np.random.randint(len(dfs[a])), :]
return [batch]
pca = theanets.Autoencoder([N, (k, activation), (N, 'tied')])
pca.train(batch, **kwargs)
key = '{}_k{}'.format(activation, k)
if 'hidden_l1' in kwargs:
key += '_s{hidden_l1:.4f}'.format(**kwargs)
for df, fn in zip(dfs, fns):
df = pd.DataFrame(pca.encode(df.values.astype('f')), index=df.index)
s = io.StringIO()
df.to_csv(s, index_label='time')
out = fn.replace('_jac', '_jac_' + key)
with gzip.open(out, 'wb') as handle:
handle.write(s.getvalue().encode('utf-8'))
logging.info('%s: saved %s', out, df.shape)
out = os.path.join(source, 'pca_{}.pkl'.format(key))
pickle.dump(pca, open(out, 'wb'))
@climate.annotate(
root='load data files from subject directories in this path',
k=('compress to this many dimensions', 'option', None, int),
activation=('use this activation function', 'option'),
)
def main(root, k=1000, activation='relu'):
for subject in lmj.cubes.Experiment(root).subjects:
compress(subject.root, k, activation,
momentum=0.9,
hidden_l1=0.01,
weight_l1=0.01,
monitors={'hid1:out': (0.01, 0.1, 1, 10)})
if __name__ == '__main__':
climate.call(main)
| StarcoderdataPython |
1863279 | <gh_stars>0
from gitlabform.gitlab.core import GitLabCore
class GitLabServices(GitLabCore):
def get_service(self, project_and_group_name, service):
return self._make_requests_to_api("projects/%s/services/%s", (project_and_group_name, service))
def set_service(self, project_and_group_name, service, data):
# DO NOT CHANGE BELOW json=data , it is necessary to pass data as JSON for Services API to work FULLY properly!
# see https://gitlab.com/gitlab-org/gitlab/-/issues/202216 for more info.
self._make_requests_to_api("projects/%s/services/%s", (project_and_group_name, service),
'PUT', data=None, expected_codes=[200, 201], json=data)
def delete_service(self, project_and_group_name, service):
self._make_requests_to_api("projects/%s/services/%s", (project_and_group_name, service),
'DELETE', expected_codes=[200, 204])
| StarcoderdataPython |
9631960 | <reponame>Superomeg4/SciDataTool<gh_stars>0
import numpy as np
from SciDataTool.Functions import NormError, UnitError, AxisError
from SciDataTool.Functions.conversions import convert as convert_unit, to_dB, to_dBA
from SciDataTool.Functions.derivation_integration import (
derivate,
integrate,
antiderivate,
)
from SciDataTool.Functions.sum_mean import (
my_sum,
my_mean,
root_mean_square,
root_sum_square,
)
def convert(self, values, unit, is_norm, is_squeeze, axes_list):
"""Returns the values of the field transformed or converted.
Parameters
----------
self: Data
a Data object
values: ndarray
array of the field
unit: str
Unit requested by the user ("SI" by default)
is_norm: bool
Boolean indicating if the field must be normalized (False by default)
Returns
-------
values: ndarray
values of the field
"""
if is_squeeze:
values = np.squeeze(values)
if unit == self.unit or unit == "SI":
if is_norm:
try:
values = self.normalizations["ref"].normalize(values)
except Exception:
raise NormError("Reference value not specified for normalization")
elif unit == "dB":
ref_value = 1.0
if "ref" in self.normalizations:
ref_value *= self.normalizations["ref"].ref
values = to_dB(np.abs(values), self.unit, ref_value)
elif unit == "dBA":
ref_value = 1.0
if "ref" in self.normalizations:
ref_value *= self.normalizations["ref"].ref
for axis in axes_list:
is_match = False
if axis.name == "freqs" or axis.corr_name == "freqs":
if axis.corr_values is not None and axis.unit not in [
"SI",
axis.corr_unit,
]:
axis_values = axis.corr_values
else:
axis_values = axis.values
index = axis.index
values = np.apply_along_axis(
to_dBA, index, values, axis_values, self.unit, ref_value
)
is_match = True
elif axis.name == "frequency":
if axis.corr_values is None:
axis_values = axis.values
else:
axis_values = axis.corr_values
index = axis.index
values = np.apply_along_axis(
to_dBA, index, values, axis_values, self.unit, ref_value
)
is_match = True
if not is_match:
raise UnitError("dBA conversion only available for fft with frequencies")
elif unit in self.normalizations:
values = self.normalizations.get(unit).normalize(values)
else:
values = convert_unit(values, self.unit, unit)
return values
| StarcoderdataPython |
3269920 | '''
Author: <NAME>
Email: <EMAIL>
Date created: 2020/1/6
Python Version: 3.6
'''
"""
Code to split the data into train and test dataset
"""
import configparser
import logging
import sys
import click
import ast
import pandas as pd
import os
sys.path.append('.')
from src.data.utils import read_raw_data, select_years, get_city_output_path, inner_concatenate
@click.command()
@click.argument('config_path', type=click.Path(exists=True))
@click.argument('merged_file_path', type=click.Path(exists=True))
@click.argument('train_data_path', type=click.Path())
@click.argument('valid_data_path', type=click.Path())
@click.argument('test_data_path', type=click.Path())
def extract_file(config_path, merged_file_path, train_data_path, valid_data_path, test_data_path):
logger = logging.getLogger(__name__)
logger.info('data train-test splitting')
# save path exist make sure
save_pardir = os.path.dirname(train_data_path)
if not os.path.exists(save_pardir):
os.makedirs(save_pardir)
pars = configparser.ConfigParser()
pars.read(config_path)
city_list = ast.literal_eval(pars['global']['city'])
train_years = ast.literal_eval(pars['global']['train_year'])
valid_years = ast.literal_eval(pars['global']['valid_year'])
test_years = ast.literal_eval(pars['global']['test_year'])
# seed word list
seed_path = pars['extract_search_trend']['term_list_path']
seed_word_list = list(set([k.lower() for k in pd.read_csv(seed_path, header=None)[0].values]))
seed_word_list.append('DATE')
# pol label list
label_columns = ast.literal_eval(pars['extract_pol_label']['y_column_name'])
seed_word_list = label_columns + seed_word_list
# concatenate the train and valid data
x_train_all, x_valid_all, x_test_all = pd.DataFrame(columns=seed_word_list),\
pd.DataFrame(columns=seed_word_list), pd.DataFrame(columns=seed_word_list)
for city in city_list:
input_single_file_path = get_city_output_path(merged_file_path, city)
output_city_test_path = get_city_output_path(test_data_path, city)
output_city_train_path = get_city_output_path(train_data_path, city)
output_city_valid_path = get_city_output_path(valid_data_path, city)
merged_data = read_raw_data(input_single_file_path)
merged_data.index = pd.to_datetime(merged_data.DATE)
train_data = select_years(merged_data, train_years)
valid_data = select_years(merged_data, valid_years)
test_data = select_years(merged_data, test_years)
# save single city data
train_data.to_csv(output_city_train_path, index=False)
valid_data.to_csv(output_city_valid_path, index=False)
test_data.to_csv(output_city_test_path, index=False)
if len(x_train_all) == 0:
x_train_all = train_data.copy()
x_valid_all = valid_data.copy()
x_test_all = test_data.copy()
else:
# concatenate data
x_train_all = inner_concatenate(x_train_all, train_data)
x_valid_all = inner_concatenate(x_valid_all, valid_data)
x_test_all = inner_concatenate(x_test_all, test_data)
# drop all NAs columns
x_train_all.dropna(axis=1, how='all', inplace=True)
x_valid_all.dropna(axis=1, how='all', inplace=True)
x_test_all.dropna(axis=1, how='all', inplace=True)
# create train.csv, test.csv to check existence
x_train_all.to_csv(train_data_path, index=False)
x_valid_all.to_csv(valid_data_path, index=False)
x_test_all.to_csv(test_data_path, index=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
extract_file()
| StarcoderdataPython |
6665109 | <reponame>thequicksort/puddle
import networkx as nx
from puddle import mk_session, Droplet, project_path
min_volume = 1.0
max_volume = 4.0
def plan(low, high, target, epsilon=0.01):
graph = nx.DiGraph()
mid = (low + high) / 2
while abs(mid - target) > epsilon:
graph.add_edge(low, mid)
graph.add_edge(high, mid)
if target < mid:
high = mid
else:
low = mid
mid = (low + high) / 2
rev_topo = reversed(list(nx.topological_sort(graph)))
result = next(rev_topo)
for _, _, data in graph.in_edges(result, data=True):
data['weight'] = 1
for node in rev_topo:
print(node)
total = sum(w for _, _, w in graph.out_edges(node, data='weight'))
graph.node[node]['total'] = total
in_w = (total + 1) // 2
for _, _, data in graph.in_edges(node, data=True):
data['weight'] = in_w
print(list(graph.nodes(data='total'))),
print(list(graph.edges(data='weight')))
for node in graph:
ins = graph.in_edges(node, data='weight')
outs = graph.out_edges(node, data='weight')
out_amt = sum(w for _, _, w in outs)
in_amt = sum(w for _, _, w in ins)
print(ins, outs, in_amt, out_amt)
assert not ins or out_amt <= in_amt
return graph
def dilute(session, d_low_factory, d_high_factory, c_target, epsilon=0.001):
def dilute_rec(d0, d1):
session._flush()
con0 = d0.concentration
con1 = d1.concentration
assert d0.concentration <= d1.concentration
# print(len(session.arch.droplets),
# d0.concentration, d1.concentration, c_target)
if abs(d0.concentration - c_target) < epsilon:
# session.arch.remove_droplet(d1)
return d0
if abs(d1.concentration - c_target) < epsilon:
# session.arch.remove_droplet(d0)
return d1
session._flush()
d = session.mix(d0, d1)
# FIXME account for volume when picking
da, db = session.split(d)
session._flush()
d_next = da
# session.arch.remove_droplet(db)
# print(d_next.concentration)
if abs(d_next.concentration - c_target) < epsilon:
return d_next
if d_next.concentration < c_target:
d1_again = dilute(session, d_low_factory, d_high_factory, con1,
epsilon)
return dilute_rec(d_next, d1_again)
else:
d0_again = dilute(session, d_low_factory, d_high_factory, con0,
epsilon)
return dilute_rec(d0_again, d_next)
return dilute_rec(d_low_factory(), d_high_factory())
class VolConcDroplet(Droplet):
def __init__(self, *args, **kwargs):
self.volume = kwargs.pop('volume', 1)
self.concentration = kwargs.pop('concentration', 0)
super().__init__(*args, **kwargs)
def mix(self, other):
result = super().mix(other)
result.volume = self.volume + other.volume
mass1 = self.concentration * self.volume
mass2 = other.concentration * other.volume
result.concentration = (mass1 + mass2) / result.volume
return result
def split(self):
d1, d2 = super().split()
d1.volume = self.volume / 2
d2.volume = self.volume / 2
d1.concentration = self.concentration
d2.concentration = self.concentration
return d1, d2
arch_path = project_path('tests/arches/arch-big.yaml')
with mk_session(arch_path) as session:
# FIXME this needs arch big for now because place and route is bad
# also, you just can't do that many iterations
c_low = 0
c_high = 1
c_target = .25
eps = 0.1
def d_low_factory():
return session.create(
location=None,
volume=1,
dimensions=None,
concentration=c_low,
droplet_class=VolConcDroplet,
)
def d_high_factory():
return session.create(
location=None,
volume=1,
dimensions=None,
concentration=c_high,
droplet_class=VolConcDroplet,
)
d = dilute(session, d_low_factory, d_high_factory, c_target, epsilon=eps)
assert abs(d.concentration - c_target) < eps
| StarcoderdataPython |
3380185 | <gh_stars>0
import connexion
import os
import project.app
orig_environ = dict(os.environ)
orig_environ["ALLOWED_API_KEYS"] = "test-api-key"
os.environ.update(orig_environ)
from flask_sqlalchemy import SQLAlchemy
from flask_testing import TestCase
from sqlalchemy.exc import SQLAlchemyError
from ge_core_shared import decorators, exception_handlers, middleware
from swagger_server.encoder import JSONEncoder
DB = SQLAlchemy()
class BaseTestCase(TestCase):
def create_app(self):
app = connexion.App(__name__, specification_dir='../swagger/')
flask_app = app.app
flask_app.json_encoder = JSONEncoder
flask_app.config = project.app.APP.config
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(flask_app)
app.add_error_handler(SQLAlchemyError, exception_handlers.db_exceptions)
# Register middleware
middleware.auth_middleware(app.app, "core_user_data_store")
app.add_api('swagger.yaml', arguments={'title': 'Test User Data API'})
self.flask_app = flask_app
return flask_app
@decorators.db_exception
def setUp(self):
super().setUp()
meta = DB.metadata
meta.reflect(DB.engine)
# By reversing the tables, children should get deleted before parents.
for table in reversed(meta.sorted_tables):
if table.name == "alembic_version": # Do not delete migration data
continue
DB.session.execute(table.delete())
DB.session.commit()
def tearDown(self):
super().tearDown()
# Closes all active connections between tests. Prevents session errors
# bleeding over.
DB.session.close_all()
| StarcoderdataPython |
1744590 | # Under MIT License, see LICENSE.txt
from typing import Dict
from Util import Position
class Ball:
def __init__(self, position=Position()):
self._position = position
self._velocity = Position()
def update(self, new_dict: Dict):
self.position = new_dict['position']
self.velocity = new_dict['velocity']
def is_moving_fast(self, fast_speed = 600.0): # mm/s
return fast_speed < self.velocity.norm
def is_mobile(self, immobile_speed = 300.0): # mm/s
return immobile_speed < self.velocity.norm
def is_immobile(self):
return not self.is_mobile()
@property
def position(self) -> Position:
return self._position
@position.setter
def position(self, value):
assert isinstance(value, Position)
self._position = value
@property
def velocity(self) -> Position:
return self._velocity
@velocity.setter
def velocity(self, value):
assert isinstance(value, Position)
self._velocity = value
| StarcoderdataPython |
3448141 | <gh_stars>1-10
a=[]
b=[]
def multiples_of_3(n):
i=1
for i in range(n): # stops at 1 less than the value passed to `range`
m=i*3
if(m<n):
a.append(m)
def multiples_of_5(n):
j=1
for j in range(n): # could change to 201 and still work
k=j*5
if(k<n):
b.append(k)
if __name__ == "__main__":
n=input()
multiples_of_3(n)
multiples_of_5(n)
print sum(set(a+b)) | StarcoderdataPython |
8003973 | import random
import time
import sys
from pyparsems import *
def genNums(size):
nums = []
for i in range(size):
nums.append(random.randint(0, 10))
return nums
def sumList(nums):
res_sum = sum(nums)
if __name__ == '__main__':
if len(sys.argv) and sys.argv[1].isdigit():
n = int(sys.argv[1])
st = time.time()
nums = genNums(n)
res_sum = sumNums(nums)
et = time.time()
rt = et - st
print(res_sum)
print(rt)
print(parseMilliSecs().parse_millisecs(rt))
| StarcoderdataPython |
3286239 | <reponame>kathryncrouch/eupathws
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 Genome Research Ltd
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import warnings
import datetime
import six
try:
from exceptions import RuntimeWarning
except:
pass
class GOCollection(object):
def __init__(self, taxon_id):
self.gos = {}
self.taxon_id = taxon_id
self.aspects = {'Biological Process': 'P',
'Molecular Function': 'F',
'Cellular Component': 'C'}
def _aspect2oneletter(self, aspect):
return self.aspects[aspect]
def add_item(self, v, source='EuPathDB'):
"""Add GO terms from an item parsed from EuPathTables to the set."""
if 'GO Terms' in v:
for transcript_id, goterms in six.iteritems(v['GO Terms']):
for go in goterms:
aspect = None
object_symbol = transcript_id
try:
aspect = self._aspect2oneletter(go['Ontology'])
except:
warnings.warn("couldn't get aspect for %s (%s)"
% (go['GO ID'], go['GO Term Name']),
RuntimeWarning)
continue
qualifier = ''
if go['Is Not'] != 'N/A':
qualifier = "NOT"
if aspect and go['GO ID']:
self.add_generic(source, object_symbol, object_symbol,
qualifier,
go["GO ID"], 'GO_REF:0000002',
go['Evidence Code'], '', aspect,
v['product'], '', 'transcript',
go["Source"])
def add_from_gaf_iterator(self, it, stream=None):
for item in it:
# XXX this assumes object IDs are on protein level! Check if gene symbol is usable
if (not stream) or (stream and (item['object_id'] in stream.uniprots)):
if stream and (item['object_id'] in stream.uniprots):
item['object_id'] = stream.uniprots[item['object_id']]
item['object_type'] = 'gene'
self.add_generic(item['db'], item['object_id'],
item['object_symbol'], item['qualifier'],
item['go_id'], item['dbref'],
item['evidence_code'], item['withfrom'],
item['aspect'], item['object_name'],
item['object_synonym'], item['object_type'],
item['assigned_by'])
def add_generic(self, db='', object_id='', object_symbol='', qualifier='',
go_id='', dbref='', evidence_code='', withfrom='',
aspect='', object_name='', object_synonym='',
object_type='gene', assigned_by=''):
"""Add GO terms parsed separately to the set."""
assert(go_id)
new_item = {'db': db, 'object_id': object_id,
'object_symbol': object_symbol, 'qualifier': qualifier,
'go_id': go_id, 'dbref': dbref,
'evidence_code': evidence_code, 'withfrom': withfrom,
'aspect': aspect, 'object_name': object_name,
'object_synonym': object_synonym,
'object_type': object_type, 'taxon': self.taxon_id,
'assigned_by': assigned_by}
if not go_id in self.gos:
self.gos[go_id] = []
self.gos[go_id].append(new_item)
def to_gafv1(self, outstream):
"""Output the contents of this collection in GAF 1.0 format."""
outstream.write("!gaf-version: 1.0\n")
for k, v in six.iteritems(self.gos):
for it in v:
outstream.write(it['db'] + "\t" +
it['object_id'] + "\t" +
it['object_symbol'] + "\t" +
it['qualifier'] + "\t" +
it['go_id'] + "\t" +
it['dbref'] + "\t" +
it['evidence_code'] + "\t" +
it['withfrom'] + "\t" +
it['aspect'] + "\t" +
it['object_name'] + "\t" +
it['object_synonym'] + "\t" +
it['object_type'] + "\t" +
"taxon:" + str(it['taxon']) + "\t" +
datetime.date.today().strftime('%Y%m%d') + "\t" +
it['assigned_by'] + "\n")
| StarcoderdataPython |
3494413 | class PyZbarError(Exception):
pass
| StarcoderdataPython |
3303954 | def fixformatting(data):
passports = []
currentPassport = {}
for line in data:
if line == "": # Stack Overflow code
passports.append(currentPassport)
currentPassport = {}
else:
for item in line.split(" "):
key, value = item.split(":")
currentPassport[key] = value
passports.append(currentPassport)
return passports
def validate(passport):
items = {"byr","iyr","eyr","hgt","hcl","ecl","pid"}
for item in items:
if item not in passport:
return False
return True
def validatept2(passport):
if validate(passport) == False:
return False
if not(1920 <= int(passport["byr"]) <= 2002):
return False
if not(2010 <= int(passport["iyr"]) <= 2020):
return False
if not(2020 <= int(passport["eyr"]) <= 2030):
return False
if passport["hgt"][-2:] not in {"cm","in"}:
return False
if passport["hgt"][-2:] == "cm" and not 150 <= int(passport["hgt"][0:-2]) <= 193:
return False
if passport["hgt"][-2:] == "in" and not 59 <= int(passport["hgt"][0:-2]) <= 76:
return False
if passport["hcl"][0] != "#" or len(passport["hcl"]) != 7:
return False
for char in passport["hcl"][1:]:
if char not in set("0123456789abcdef"):
return False
if passport["ecl"] not in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]:
return False
if len(passport["pid"]) != 9:
return False
for char in passport["pid"]:
if char not in set("0123456789"):
return False
return True
def part1(passports):
count = 0
for passport in passports:
if validate(passport) == True:
count += 1
return count
def part2(passports):
count = 0
for passport in passports:
if validatept2(passport) == True:
count += 1
return count
file = open("input.txt")
data = []
for line in file:
data.append(line.strip())
passports = fixformatting(data)
print(f"Sol 1: {part1(passports)}")
print(f"Sol 2: {part2(passports)}") | StarcoderdataPython |
3457147 | <reponame>collinwright/nixpkgs
#!/usr/bin/python
import json
import sys
def process_section(name, section):
packages = set()
if "resolved" in section:
packages.add((name, section["resolved"]))
if "dependencies" in section:
for name in section["dependencies"]:
packages.add((name, section["dependencies"][name]))
return packages
def main():
with open(sys.argv[1], 'r') as f:
tree = json.loads(f.read())
packages = set()
topDependencies = tree["dependencies"]
for area in topDependencies:
for name in topDependencies[area]:
packages = packages.union(process_section(name, topDependencies[area][name]))
for (name, version) in packages:
print("%s %s" % (name, version))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1818342 | <gh_stars>1-10
from __future__ import absolute_import
from . import auth_providers
from .app import KnowledgeFlask
from .db_repo import DBSession
session_obj = DBSession()
db_repo_session = session_obj.session()
db_repo_engine = session_obj.engine()
| StarcoderdataPython |
5089414 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
from lisa_interaction_msgs.msg import LisaUtterAction, LisaUtterGoal
class LisaUtterActionStateWithUserkey(EventState):
'''
An uttering action is performed with high priority (all running dialogue are dropped immediately), this has to be used
to perform an urgent prioritary announcement, possibly out of the context.
-- wait_time float wait time before exit (the end of uttering is not yet implemented, this is a fix timeout). If set to 0 (default) exit when the uttering is finished.
#> text_to_utter string Sentence to be uttered.
#> error_reason string An eventual error.
<= uttered_all the entire string was uttered.
<= timeout A time out occurs during the utterance.
<= error An error happend, more details in error_reason
'''
def __init__(self, wait_time=0):
# See example_state.py for basic explanations.
super(LisaUtterActionStateWithUserkey, self).__init__(outcomes = ['uttered_all', 'timeout', 'command_error'],
input_keys = ['text_to_utter'],
output_keys = ['error_reason'])
self._topic = '/lisa/say'
self._client = ProxyActionClient({self._topic: LisaUtterAction}) # pass required clients as dict (topic: type)
# It may happen that the action client fails to send the action goal.
self._error = False
self._error_reason = ''
self._wait_time = wait_time
if wait_time:
Logger.logwarn('time out not yet implemented for LisaUtterActionState')
def execute(self, userdata):
# While this state is active, check if the action has been finished and evaluate the result.
# Check if the client failed to send the goal.
if self._error:
Logger.logwarn('command_error. error_reason: ' + str(self._error_reason))
userdata.error_reason = self._error_reason
return 'command_error'
if self._client.has_feedback(self._topic):
pass #Logger.logdebug('Progress {}'.format(self._client.get_feedback(self._topic)))
# Check if the action has been finished
if self._client.has_result(self._topic):
result = self._client.get_result(self._topic)
userdata.error_reason = ''#'uttered_all: result: %s' % str(result)
Logger.loginfo('uttered_all: result: %s' % str(result))
return 'uttered_all'
# TODO: get time for TIMEOUT, ros time something
# cancel and timeout
# Check if the client failed to send the goal.
# todo: timeout
#if timeout:
# return 'timeout'
# If the action has not yet finished, no outcome will be returned and the state stays active.
def on_enter(self, userdata):
self._sentence = userdata.text_to_utter
# Create the goal.
goal = LisaUtterGoal()
goal.sentence = self._sentence
# TODO: get time for timeot, ros time something
# Send the goal.
self._error = False # make sure to reset the error state since a previous state execution might have failed
try:
Logger.loginfo('Send \ntopic: {}\ngoal:{}\n '.format(self._topic, goal))
self._client.send_goal(self._topic, goal)
except Exception as e:
# Since a state failure not necessarily causes a behavior failure, it is recommended to only print warnings, not errors.
# Using a linebreak before appending the error log enables the operator to collapse details in the GUI.
self._error_reason = 'Failed to send the sentece command:\n%s' % str(e)
Logger.logwarn(self._error_reason)
self._error = True
def on_exit(self, userdata):
# Make sure that the action is not running when leaving this state.
# A situation where the action would still be active is for example when the operator manually triggers an outcome.
if not self._client.has_result(self._topic):
self._client.cancel(self._topic)
self._error_reason = 'Cancelled active action goal.'
Logger.loginfo(self._error_reason)
| StarcoderdataPython |
11388238 | <reponame>dmontielg/smoking-microbiome
#!/usr/bin/env python
import os
import subprocess
import warnings
from collections import Counter
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import matthews_corrcoef, make_scorer, classification_report, confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from scipy import interp
from biom.table import Table
from biom import load_table
import biom
import joblib
class GridParams:
# Randomized
gamma_range = np.linspace(0.0001, 10, 100)
c_range = np.linspace(1, 10, 1000)
def getKNN(self):
return {'n_neighbors': list(range(5,101)), 'weights': ["uniform", "distance"],
'algorithm': ["auto", "kd_tree", "ball_tree", "brute"],
'leaf_size': list(range(30,61)), 'p':[1,2]}
def getLogistic(self):
return {'penalty' : ['l1'], 'C' : np.logspace(-4, 4, 20),
'max_iter' : [1000], 'solver' : ['liblinear']}
def getSVMRadial(self):
return {'C': self.c_range, 'gamma': self.gamma_range, 'kernel': ["rbf"]}
def getSVMLinear(self):
return {'C': self.c_range, 'kernel': ["linear"]}
def getDT(self):
return {"criterion": ["gini","entropy"],
"max_depth": range(1,21),
"min_samples_split": range(2,21),
"min_samples_leaf": range(1,10),
"max_features": ["auto", "sqrt", "log2"]
}
def getRF(self):
return {
'n_estimators' : range(100, 1001, 50),
'max_features': ["auto", "sqrt"],
'max_depth' : range(10,101, 10),
'min_samples_split': range(2,11),
'min_samples_leaf': range(1,5),
'bootstrap': [True, False]
}
def getXG(self):
return {
'n_estimators': range(100, 1001, 50),
'learning_rate': np.linspace(0.05, 3, 10),
'reg_alpha': np.linspace(0, 1, 10),
'reg_lambda': np.linspace(0, 1, 10),
'min_child_weight': range(1, 11),
'max_depth': range(3, 11),
'gamma' : np.linspace(0, 0.2, 10),
'subsample': np.linspace(0.5, 1, 10),
'colsample_bytree': np.linspace(0.5, 1, 10),
'boosting_type': ['gbdt']
}
def get_metrics_classification(y_test, y_pred, y_probs):
"""
Returns: MCC score, AUCs, Confusion matrix
Parameters
----------
y_test : numpy array
array of assign classes e.g [0,1,0,2...]
y_pred : numpy array
array with predicted label classes e.g. [0,1,0,2...]
"""
mcc_score = calculate_mcc(y_test, y_pred)
auc_score = calculate_roc_auc(y_test,y_probs)
cnf_matrix = confusion_matrix(y_test, y_pred)
return mcc_score, auc_score, cnf_matrix
def get_random_samples(df_metadata, target,id_samples):
t = 0.2
phenotype = list(set(df_metadata[target]))
random_samples = []
np.random.seed()
for p in phenotype:
tmp = df_metadata.loc[df_metadata[target] == p]
tmp_target = tmp[id_samples].values
for j in np.random.choice(tmp_target, int(t*len(tmp_target)), replace=False):
random_samples.append(j)
return random_samples
def get_test_set(filename_pheno, filename_pos, random_samples_test,
model_path, i, id_samples, target):
df_test_pheno = pd.read_csv(filename_pheno, index_col = 0, sep="\t")
df_test_pos = pd.read_csv(filename_pos, index_col=0)
df_test_pos = df_test_pos.loc[df_test_pos.index.isin(random_samples_test)]
df_test_pheno = df_test_pheno.loc[df_test_pheno[id_samples].isin(df_test_pos.index)]
df_test_pos = df_test_pos.loc[df_test_pheno[id_samples]]
df_test_pos = total_sum(df_test_pos)
y_test_labels = df_test_pheno[target].values
y_test = y_test_labels
X_test = df_test_pos.values
y_test = LabelBinarizer().fit_transform(y_test_labels)
if len(set(y_test_labels)) == 3:
y_test = np.argmax(y_test, axis=1)
elif len(set(y_test_labels)) == 2:
y_test = y_test.flatten()
#random_samples_test = df_test_pos.index + "_"+y_test_labels + "_"+df_test_pheno["study"]
random_samples_test = df_test_pos.index + "_" +y_test_labels
dirpath = model_path
models_dirpath = model_path
outpath = model_path
y_probs_final = np.array(0)
models_dirpath = os.walk(models_dirpath)
count = 0
for dirpath, dirnames, filenames in models_dirpath:
for filename in [f for f in filenames if f.endswith(".model")]:
path_model = os.path.join(dirpath, filename)
count+=1
model = joblib.load(path_model)
y_pred = model.predict(X_test)
y_probs_final = y_probs_final + model.predict_proba(X_test)
y_probs_final = y_probs_final/count
y_pred = np.argmax(y_probs_final, axis=1)
df_probs_final = pd.DataFrame(y_probs_final, index = random_samples_test, columns=["current","never"])
df_probs_final.to_csv(outpath + str(i) + "probs_test.txt")
y_real = []
for i in df_probs_final.index:
if "current" in i:
y_real.append(0)
elif "former" in i:
y_real.append(1)
elif "never" in i:
y_real.append(1)
y_real = np.array(y_real)
mcc_score, auc_score, cnf_matrix = get_metrics_classification(y_real, y_pred, y_probs_final)
return mcc_score, auc_score
def calculate_mcc(y_test, y_pred):
if len(set(y_pred)) > 1:
return matthews_corrcoef(y_test, y_pred)
else:
return 0
def total_sum(df_pos):
for i, j in df_pos.iterrows():
df_pos.loc[i] = j/sum(j)
return df_pos
def set_one_hot_encoded(target):
onehot_encoder = OneHotEncoder(sparse=False, categories='auto')
integer_encoded = target.reshape(len(target), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
return onehot_encoded
def calculate_roc_auc(y, y_score):
"""
Parameters
----------
y : numpy array
array of assign classes e.g np[0,1,0,2...].
y_score : numpy array
matrix with assign probabilities.
Returns
Dict of RoC AUC values per class
-------
None.
"""
onehot_encoded = set_one_hot_encoded(y)
n_classes = onehot_encoded.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], thresholds = roc_curve(onehot_encoded[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
return roc_auc
def create_tmp_dirs(folder):
flag = True
if os.path.isdir(folder):
while(flag):
print("WARNING! File "+folder+" already exists, \nWould you like to remove it?")
choice = input("y/n: ")
if str(choice).upper() == "Y":
cmd = 'rm -r '+folder
subprocess.call(cmd, shell=True)
cmd = 'mkdir '+folder
subprocess.call(cmd, shell=True)
flag = False
return True
elif str(choice).upper() == "N":
flag = False
return False
else:
print("Please type y/Y or n/N")
else:
cmd = 'mkdir '+folder
subprocess.call(cmd, shell=True)
return True
def tada_augmentation(X_train, y_train, obs_ids, i, tree_newick):
"""
input: X_train, y_train, obs_ids, i, tree_newick
"""
X_train = np.transpose(X_train)
samp_ids = ['%d' % j for j in range(X_train.shape[1])]
otu_table = Table(X_train, obs_ids, samp_ids)
## Creates temp forlder to store each fold result
folder = "tmp/"
output = folder + "out"
if not os.path.exists(folder):
cmd = "mkdir -p {}".format(output)
subprocess.check_output(cmd, shell = True)
fold_biom = folder + str(i) + ".biom"
with biom.util.biom_open(fold_biom, "w") as f:
otu_table.to_hdf5(f,"example")
labels_train = folder + "labels.tsv"
pd.DataFrame(y_train).to_csv(labels_train, header = False, sep="\t")
# Execute TADA
cmd = "python TADA/src/utils/python/TADA_microbiom.py -t {} \
-b {} -o {} -g {}".format(tree_newick, fold_biom, output, labels_train)
subprocess.check_output(cmd, shell = True)
input_biom = output + "/" + str(i) + ".biom"
biom_tsv = output + "/" + str(i) + ".tsv"
cmd = "biom convert -i {} -o {} --to-tsv".format(input_biom, biom_tsv)
subprocess.check_output(cmd, shell = True)
df_train_biom = pd.read_csv(biom_tsv, skiprows=[0], sep= "\t", index_col=0)
input_aug_biom = output + "/augmented_data.biom"
biom_aug_tsv = output + "/augmented_data.tsv"
cmd = "biom convert -i {} -o {} --to-tsv".format(input_aug_biom, biom_aug_tsv)
subprocess.check_output(cmd, shell = True)
df_aug_biom = pd.read_csv(biom_aug_tsv, skiprows=[0], sep= "\t", index_col=0)
labels = []
tmp = [line.rstrip('\n') for line in open(output + "/labels.tsv")]
for line in tmp:
labels.append(int(line.split("\t")[-1]))
tmp = [line.rstrip('\n') for line in open(output + "/augmented_meta_data.csv")]
for line in tmp:
labels.append(int(line.split("\t")[-1]))
# concatenate
X_train_tada = pd.concat([df_train_biom, df_aug_biom], ignore_index = True, axis=1).values
y_train_tada = labels
return np.transpose(X_train_tada), y_train_tada
def process_metrics_df(metrics_models, type_str):
pd_type = pd.DataFrame.from_dict(metrics_models)
pd_type = pd_type.stack().reset_index()
tmp_mcc = pd_type[pd_type["level_1"].str.contains("mcc")]
tmp_auc = pd_type[pd_type["level_1"].str.contains("auc")]
tmp_auc["level_1"] = [i.split("_")[0] for i in tmp_auc.level_1]
tmp_mcc["level_1"] = [i.split("_")[0] for i in tmp_mcc.level_1]
pd_type = tmp_mcc
pd_type.columns = ["fold", "model", "mcc"]
pd_type["auc"] = tmp_auc[0].values
pd_type["type"] = type_str
return pd_type
def smote_both(X_train, y_train):
t = round(abs(min(dict(Counter(y_train)).values()) - max(dict(Counter(y_train)).values())))
t_min = min(dict(Counter(y_train)).values())
t_max = max(dict(Counter(y_train)).values())
over = SMOTE(sampling_strategy=(abs(t-t_min)/t_max))
under = RandomUnderSampler(sampling_strategy=((t_max-t)/t_min))
steps = [('o', over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_smote, y_train_smote = pipeline.fit_resample(X_train, y_train)
return X_train_smote, y_train_smote
def adasyn_both(X_train, y_train):
t = round(abs(min(dict(Counter(y_train)).values()) - max(dict(Counter(y_train)).values())))
t_min = min(dict(Counter(y_train)).values())
t_max = max(dict(Counter(y_train)).values())
over = ADASYN(sampling_strategy=(abs(t-t_min)/t_max))
under = RandomUnderSampler(sampling_strategy=((t_max-t)/t_min))
steps = [('o', over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_smote, y_train_smote = pipeline.fit_resample(X_train, y_train)
return X_train_smote, y_train_smote
| StarcoderdataPython |
8025643 | <gh_stars>0
money = float(input())
sex = input()
years = int(input())
sport = input()
price = 0.0
if sex == "m":
if sport == "Gym":
price = 42
elif sport == "Boxing":
price = 41
elif sport == "Yoga":
price = 45
elif sport == "Zumba":
price = 34
elif sport == "Dances":
price = 51
elif sport == "Pilates":
price = 39
if sex == "f":
if sport == "Gym":
price = 35
elif sport == "Boxing":
price = 37
elif sport == "Yoga":
price = 42
elif sport == "Zumba":
price = 31
elif sport == "Dances":
price = 53
elif sport == "Pilates":
price = 37
if years <= 19:
price *= 0.80
if money >= price:
print(f"You purchased a 1 month pass for {sport}.")
else:
print(f"You don't have enough money! You need ${abs(money- price):.2f} more.")
| StarcoderdataPython |
3318795 | <reponame>joshmaglione/ZetaFunctionSandbox
#
# Copyright 2019 <NAME>
#
# Distributed under MIT License
#
from sage.all import Matrix as _Matrix
from sage.all import Polyhedron as _Polyhedron
from sage.all import PolynomialRing as _PolynomialRing
from sage.all import QQ as _QQ
from sage.all import var as _var
from sage.rings.integer import Integer as _Sage_int
from Zeta.smurf import SMURF as _Zeta_smurf
from GenFunc import GenFunc as _GenFunc
# Make sure we understand the input to the main functions.
def _input_check(word, leq_char, verbose, variable, sub):
if not isinstance(word, str):
raise TypeError('Expected the word to be a string.')
if len({w for w in word}) > 2:
raise ValueError('Expected word to be a 2-letter alphabet.')
if not isinstance(leq_char, str):
raise TypeError('Expected leq_char to be a string.')
if not isinstance(verbose, bool):
raise TypeError('Expected "verbose" to be either True or False.')
if not isinstance(variable, str):
raise TypeError('Expected "variable" to be a string.')
pass
# Given a word and a leq_char, construct the matrix whose rows give the
# inequalities for the Polyhedron function in Sage.
def _build_ineqs(word, leq_char, Dynkin="A"):
# Initial values.
n = len(word) + 1
relations = []
zero_vec = tuple([0 for i in range(n + 1)])
# Basic function: add k to the i-th component of v.
def add_k_i(v, i, k):
u = list(v)
u[i] += k
return tuple(u)
# nonnegative relations.
relations += [add_k_i(zero_vec, i, 1) for i in range(1, n + 1)]
# word relations.
if n > 1:
for x in zip(word, range(1, n)):
if x[0] == leq_char:
if Dynkin == "D" and x[1] == n-1:
v = add_k_i(zero_vec, x[1] - 1, -1)
u = add_k_i(v, x[1] + 1, 1)
elif Dynkin == "E" and x[1] == n-1:
v = add_k_i(zero_vec, 3, -1)
u = add_k_i(v, x[1] + 1, 1)
else:
v = add_k_i(zero_vec, x[1], -1)
u = add_k_i(v, x[1] + 1, 1)
else:
if Dynkin == "D" and x[1] == n-1:
v = add_k_i(zero_vec, x[1] - 1, 1)
u = add_k_i(v, x[1] + 1, -1)
elif Dynkin == "E" and x[1] == n-1:
v = add_k_i(zero_vec, 3, 1)
u = add_k_i(v, x[1] + 1, -1)
else:
v = add_k_i(zero_vec, x[1], 1)
u = add_k_i(v, x[1] + 1, -1)
relations.append(u)
return relations
def _eval_relations(relations, verbose, variable, sub):
n = len(relations[0]) - 1
# In case the user wants to verify the matrix.
if verbose:
print("The matrix corresponding to the polyhedral cone:")
print("%s" % (_Matrix(relations)))
# Define the polyhedral cone and corresponding polynomial ring.
P = _Polyhedron(ieqs=relations)
R = _PolynomialRing(_QQ, 'x', n)
# Define substitution.
if sub:
t = _var(variable)
if n > 1:
subs = {_var('x' + str(i)) : t for i in range(n)}
else:
subs = {_var('x') : t}
# Apply Zeta
sm = _Zeta_smurf.from_polyhedron(P, R)
Z = sm.evaluate().subs(subs).factor().simplify()
else:
# Apply Zeta
sm = _Zeta_smurf.from_polyhedron(P, R)
Z = sm.evaluate().factor().simplify()
return Z
def _solve_and_wrap(rels, verb=False, varb='t', sub=True):
Z = _GenFunc(_eval_relations(rels, verb, varb, sub))
if sub:
stand_denom = 1
X = _var(varb)
for k in range(1, len(rels[0])):
stand_denom *= (1 - X**k)
return Z.format(denominator=stand_denom)
else:
return Z
def ThinZeta_An(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char)
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
def ThinZeta_Dn(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
# If this should be type An instead, we use that function instead.
if len(word) <= 2:
return ThinZeta_An(word, leq_char=leq_char, verbose=verbose, variable=variable, sub=sub)
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char, Dynkin="D")
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
def ThinZeta_En(word, leq_char="0", verbose=False, variable='t', sub=True):
# Make sure we understand the input.
if isinstance(word, int) or isinstance(word, _Sage_int):
word = str(word.binary()[1:])[::-1]
if verbose:
print(word)
# If this should be type An instead, we use that function instead.
if len(word) <= 4:
return ThinZeta_An(word, leq_char=leq_char, verbose=verbose, variable=variable, sub=sub)
if len(word) > 7:
raise ValueError("Expected at most 7 edges for type E.")
_input_check(word, leq_char, verbose, variable, sub)
relations = _build_ineqs(word, leq_char, Dynkin="E")
return _solve_and_wrap(relations, verb=verbose, varb=variable, sub=sub)
| StarcoderdataPython |
3231065 | from .wrapped_dict import WrappedDict
class WorldState(WrappedDict):
"""A model of the world state"""
def __init__(self, *args, **kwargs):
self.turn_number = 0
super().__init__(*args, **kwargs)
def automatic_tick(self):
return
def execution_tick(self):
return
def simulation_tick(self):
self.automatic_tick()
def real_tick(self):
self.turn_number += 1
self.automatic_tick()
self.execution_tick()
def describe(self):
print(self.turn_number, self.wrapping())
def child(self):
c = super().child()
c.turn_number = self.turn_number
return c
| StarcoderdataPython |
3349410 | print('1) проверка числа на простоту (простые числа - это те числа у которых делители единица и они сами);')
def prime(n):
i = 2
while n > i:
if n % i == 0:
return (n,'непростое число')
i += 1
if i == n:
return (n,'простое число')
print(prime(51))
print(prime(17))
print('2) функция выводит список всех делителей числа:', 256)
def denominator(n):
result = []
n = n // 2
for i in range(1,n+1):
if n % i == 0:
result.append(i)
return result
print(denominator(256))
print('3) выводит самый большой простой делитель числа')
from functools import reduce
def max_denominator(n):
max_number = denominator(n)
max_number = reduce(lambda a,b: a if a>b else b, max_number)
return(max_number)
print(max_denominator(256))
| StarcoderdataPython |
9647745 | #coding:utf-8
# 作为库,下面的导入为 .tt 而不是 tt,
# 但是这样作为主程序启动就会报错,
# "ImportError: attempted relative import with no known parent package"
#
# 如果用.tt 作为主程序不会报错,但作为库使用会报错
# ImportError: No module named 'tt'
# 如果作为库,可以添加 -m
# python -m file_io.print_abspath (要从主文件夹启动)
# 不管从哪里输出,当前文件夹总是主程序文件夹
import os
from .tt import print_dir
print_dir.test()
s = os.path.abspath('.')
print(s)
def test():
print_dir.test()
| StarcoderdataPython |
6439368 | import numpy as np
from collections import deque
class CollisionDetector:
collision_resolution = 1
def __init__(self, env):
self.env = env
def isCollidedPoint(self, point):
return self.env[point[0],point[1],point[2]]
def isCollidedPath(self, start, end):
size = (start - end)
max_size = max(abs(size))
step_size = size / max_size
# TODO : binary tree search
tmp_point = start
for n in range(max_size + 1):
if self.isCollidedPoint(np.ceil(tmp_point).astype(int)):
return True
tmp_point = tmp_point + step_size
tmp_points = deque()
tmp_points.append(((start + end)*0.5, start, end))
while len(tmp_points)>0:
points = tmp_points.popleft()
p
return False
if __name__ == "__main__":
env = np.random.rand(1000).reshape((10,10,10))
env = env > 0.5
collision_detector = CollisionDetector(env)
point = np.random.randint(0, 10, 3)
clded = collision_detector.isCollidedPoint(point)
print(clded)
start = np.array([0, 0, 1])
end = np.array([8, 3, 5])
clded = collision_detector.isCollidedPath(start, end)
print(clded)
| StarcoderdataPython |
3344475 | """An example of training PCL against OpenAI Gym Envs.
This script is an example of training a PCL agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_pcl_gym.py
To solve InvertedPendulum-v1, run:
python train_pcl_gym.py --env InvertedPendulum-v1
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases()
import argparse
import chainer
import gym
gym.undo_logger_setup()
import gym.wrappers
import numpy as np
import chainerrl
from chainerrl import experiments
from chainerrl import misc
from chainerrl.optimizers import rmsprop_async
def exp_return_of_episode(episode):
return np.exp(sum(x['reward'] for x in episode))
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--processes', type=int, default=8)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='CartPole-v0')
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--outdir', type=str, default=None)
parser.add_argument('--batchsize', type=int, default=10)
parser.add_argument('--rollout-len', type=int, default=10)
parser.add_argument('--n-hidden-channels', type=int, default=100)
parser.add_argument('--n-hidden-layers', type=int, default=2)
parser.add_argument('--n-times-replay', type=int, default=1)
parser.add_argument('--replay-start-size', type=int, default=10000)
parser.add_argument('--t-max', type=int, default=None)
parser.add_argument('--tau', type=float, default=1e-2)
parser.add_argument('--profile', action='store_true')
parser.add_argument('--steps', type=int, default=8 * 10 ** 7)
parser.add_argument('--eval-interval', type=int, default=10 ** 5)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--train-async', action='store_true', default=False)
parser.add_argument('--prioritized-replay', action='store_true',
default=False)
parser.add_argument('--disable-online-update', action='store_true',
default=False)
parser.add_argument('--backprop-future-values', action='store_true',
default=True)
parser.add_argument('--no-backprop-future-values', action='store_false',
dest='backprop_future_values')
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
if args.seed is not None:
misc.set_random_seed(args.seed)
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
if args.monitor and process_idx == 0:
env = gym.wrappers.Monitor(env, args.outdir)
# Scale rewards observed by agents
if not test:
misc.env_modifiers.make_reward_filtered(
env, lambda x: x * args.reward_scale_factor)
if args.render and process_idx == 0 and not test:
misc.env_modifiers.make_rendered(env)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Switch policy types accordingly to action space types
if isinstance(action_space, gym.spaces.Box):
model = chainerrl.agents.pcl.PCLSeparateModel(
pi=chainerrl.policies.FCGaussianPolicy(
obs_space.low.size, action_space.low.size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
bound_mean=True,
min_action=action_space.low,
max_action=action_space.high,
var_wscale=1e-3,
var_bias=1,
var_type='diagonal',
),
v=chainerrl.v_functions.FCVFunction(
obs_space.low.size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
)
)
else:
model = chainerrl.agents.pcl.PCLSeparateModel(
pi=chainerrl.policies.FCSoftmaxPolicy(
obs_space.low.size, action_space.n,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers
),
v=chainerrl.v_functions.FCVFunction(
obs_space.low.size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
),
)
if not args.train_async and args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu(args.gpu)
if args.train_async:
opt = rmsprop_async.RMSpropAsync(lr=args.lr, alpha=0.99)
else:
opt = chainer.optimizers.Adam(alpha=args.lr)
opt.setup(model)
if args.prioritized_replay:
replay_buffer = \
chainerrl.replay_buffer.PrioritizedEpisodicReplayBuffer(
capacity=5 * 10 ** 3,
uniform_ratio=0.1,
default_priority_func=exp_return_of_episode,
wait_priority_after_sampling=False,
return_sample_weights=False)
else:
replay_buffer = chainerrl.replay_buffer.EpisodicReplayBuffer(
capacity=5 * 10 ** 3)
agent = chainerrl.agents.PCL(
model, opt, replay_buffer=replay_buffer,
t_max=args.t_max, gamma=0.99,
tau=args.tau,
phi=lambda x: x.astype(np.float32, copy=False),
rollout_len=args.rollout_len,
n_times_replay=args.n_times_replay,
replay_start_size=args.replay_start_size,
batchsize=args.batchsize,
train_async=args.train_async,
disable_online_update=args.disable_online_update,
backprop_future_values=args.backprop_future_values,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_env(0, True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_runs=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
if args.train_async:
experiments.train_agent_async(
agent=agent,
outdir=args.outdir,
processes=args.processes,
make_env=make_env,
profile=args.profile,
steps=args.steps,
eval_n_runs=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=timestep_limit)
else:
experiments.train_agent_with_evaluation(
agent=agent,
env=make_env(0, test=False),
eval_env=make_env(0, test=True),
outdir=args.outdir,
steps=args.steps,
eval_n_runs=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3387044 | <reponame>PHIKN1GHT/secp_2020<gh_stars>0
from server import app, DBSession
from flask import Blueprint, request, session, send_file, make_response, jsonify
from utils import captcha, cmparePswd, invalid, invalidate
from flask_jwt_extended import jwt_required, jwt_optional, create_access_token, get_jwt_identity, get_raw_jwt
import io
from model import *
import math
bp = Blueprint('mall',__name__)
from flask_sqlalchemy import BaseQuery
@bp.route("/homepage", methods=['POST', 'GET'])
def homepage():
sess = DBSession()
current_page = request.json['page'] if request.is_json and ('page' in request.json.keys()) else 1
per_page = request.json['per_page'] if request.is_json and ('per_page' in request.json.keys()) else 20
idx = (current_page - 1) * per_page
result = sess.query(Product).filter_by(shelved=True,archived=False).all()
total = len(result)
pages = math.ceil(total / per_page)
idx_start = max(min(idx, len(result)), 0)
idx_end = max(min(idx+per_page, len(result)), 0)
result = result[idx_start : idx_end]
roots = Category.all()
cates = []
for k, v in roots.items():
cate = sess.query(Category).filter_by(name=k).first()
for sk in v:
subcates = sess.query(Category).filter_by(parent_id=cate.id).all()
cates += [{"id":sc.id, "name":sc.name, 'image':sc.thumbnail} for sc in subcates]
#cates.append({"id":cate.id, "name":cate.name, "subcate":[{"id":sc.id, "name":sc.name} for sc in subcates]})
prods = [p.brief() for p in result] if result else []
return jsonify(total=total,totalPages=pages,categories=cates,products=prods), 200
@bp.route("/category", methods=['POST', 'GET'])
def category():
sess = DBSession()
category_id = request.json['id'] if request.is_json and ('id' in request.json.keys()) else None
current_page = request.json['page'] if request.is_json and ('page' in request.json.keys()) else 1
per_page = request.json['per_page'] if request.is_json and ('per_page' in request.json.keys()) else 20
category = []
if category_id == None:
roots = Category.all()
for k, v in roots.items():
category += v
else:
category += sess.query(Category).filter_by(id=category_id).first().children()
category = [sess.query(Category).filter_by(name=c).first() for c in category]
category = [c.id for c in category if c != None]
category.append(category_id)
result = []
for cate in category:
result += sess.query(Product).filter_by(shelved=True,archived=False,category_id=cate)
result = sorted(result, key=lambda x: x.id)
idx = (current_page - 1) * per_page
total = len(result)
pages = math.ceil(total / per_page)
idx_start = max(min(idx, len(result)), 0)
idx_end = max(min(idx+per_page, len(result)), 0)
result = result[idx_start : idx_end]
prods = [p.brief() for p in result] if result else []
return jsonify(total=total,totalPages=pages,products=prods), 200
@bp.route("/catalogs")
def catalogs():
sess = DBSession()
roots = Category.all()
cates = []
for k, v in roots.items():
cate = sess.query(Category).filter_by(name=k).first()
for sk in v:
subcates = sess.query(Category).filter_by(parent_id=cate.id).all()
cates += [{"id":sc.id, "name":sc.name, 'image':sc.thumbnail} for sc in subcates]
return jsonify(catalogs=cates), 200
@bp.route("/search", methods=['POST', 'GET'])
def search():
sess = DBSession()
filterstr = "%{}%".format(request.json['filter']) if request.is_json and ('filter' in request.json.keys()) else "%"
current_page = request.json['page'] if request.is_json and ('page' in request.json.keys()) else 1
per_page = request.json['per_page'] if request.is_json and ('per_page' in request.json.keys()) else 20
result = sess.query(Product).filter_by(shelved=True,archived=False).filter(Product.title.like(filterstr)).all()
result = sorted(result, key=lambda x: x.id)
idx = (current_page - 1) * per_page
total = len(result)
pages = math.ceil(total / per_page)
idx_start = max(min(idx, len(result)), 0)
idx_end = max(min(idx+per_page, len(result)), 0)
result = result[idx_start : idx_end]
prods = [p.brief() for p in result] if result else []
return jsonify(total=total,totalPages=pages,products=prods), 200
| StarcoderdataPython |
6626015 | <gh_stars>1-10
# 2019-11-11 16:07:35(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
celebratable = [7, 5, 3]
def main():
x = int(sys.stdin.readline().rstrip())
print('YES' if x in celebratable else 'NO')
if __name__ == "__main__":
main()
| StarcoderdataPython |
390542 | import os
import ogr
from math import ceil
def rasttovecgrid(outputGridfn,xmin,xmax,ymin,ymax,gridHeight,gridWidth):
# convert sys.argv to float
xmin = float(xmin)
xmax = float(xmax)
ymin = float(ymin)
ymax = float(ymax)
gridWidth = float(gridWidth)
gridHeight = float(gridHeight)
# get rows
rows = ceil((ymax-ymin)/gridHeight)
# get columns
cols = ceil((xmax-xmin)/gridWidth)
# start grid cell envelope
ringXleftOrigin = xmin
ringXrightOrigin = xmin + gridWidth
ringYtopOrigin = ymax
ringYbottomOrigin = ymax-gridHeight
# create output file
outDriver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(outputGridfn):
os.remove(outputGridfn)
outDataSource = outDriver.CreateDataSource(outputGridfn)
outLayer = outDataSource.CreateLayer(outputGridfn,geom_type=ogr.wkbPolygon )
featureDefn = outLayer.GetLayerDefn()
# create grid cells
countcols = 0
while countcols < cols:
countcols += 1
# reset envelope for rows
ringYtop = ringYtopOrigin
ringYbottom =ringYbottomOrigin
countrows = 0
while countrows < rows:
countrows += 1
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(ringXleftOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYtop)
ring.AddPoint(ringXrightOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYbottom)
ring.AddPoint(ringXleftOrigin, ringYtop)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
# add new geom to layer
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(poly)
outLayer.CreateFeature(outFeature)
outFeature.Destroy
# new envelope for next poly
ringYtop = ringYtop - gridHeight
ringYbottom = ringYbottom - gridHeight
# new envelope for next poly
ringXleftOrigin = ringXleftOrigin + gridWidth
ringXrightOrigin = ringXrightOrigin + gridWidth
# Close DataSources
outDataSource.Destroy()
| StarcoderdataPython |
5167299 | <filename>setup.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
requirements = [
# TODO: put package requirements here
]
test_requirements = [
'six',
'mock',
'pytest',
'django<2.0.0',
'requests',
]
setup(
name='curlit',
version='0.2.1',
description="Generate curl commands from various python libraries request objects",
long_description=readme + '\n\n' + history,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/NorthIsUp/curlit',
packages=[
'curlit',
],
package_dir={
'curlit': 'curlit'
},
include_package_data=True,
install_requires=requirements,
# test_suite='tests',
tests_require=test_requirements,
setup_requires=pytest_runner,
)
| StarcoderdataPython |
11283648 | import torch as t
import torch.nn as nn
import torch.nn.functional as F
class AttentionLayer(nn.Module):
"""simple layer for attention-based convolution"""
def __init__(self, opt, in_channel, out_channel, alpha, concat=True):
super(AttentionLayer, self).__init__()
self.drop_out = opt.drop_out
self.in_dim = in_channel
self.out_dim = out_channel
self.concat = concat
self.alpha = alpha
self.W = nn.Parameter(t.zeros(size=(in_channel, out_channel)))
# nn.init.kaiming_uniform(self.W.data, gain=1.414)
nn.init.xavier_uniform_(self.W.data, gain=1.414) # 可尝试用kaiming_uniform_替代
self.a = nn.Parameter(t.zeros(size=(2 * out_channel, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, sub_graph):
"""
apply attention-based convolution
:param sub_graph: previous sub_graph, shape of (batch x (m+1) x new_tree_nodes x p)
:return: [new sub_graph, shape of (batch x new_tree_nodes x p),
attention cofficients, shape of batch x m x new_tree_nodes]
"""
h = sub_graph.matmul(self.W) # shape of (batch x (m+1) x new_tree_nodes x p')
batch, m, tree_nodes, features = h.shape
m -= 1
roots = h[:, 0, :, :] # shape of (batch x new_tree_nodes x p')
childs = t.cat([h[:, :, i, :] for i in range(tree_nodes)], dim=1) # batch x new_tree_nodes*(m+1) x p'
a_input = t.cat([roots.repeat(1, 1, m+1).view(batch, -1, features), childs], dim=-1) # map the roots to childs
e = self.leakyrelu(t.matmul(a_input, self.a).squeeze(-1)) # shape of (batch x ((m+1)*new_tree_nodes))
e = t.cat([e[:, i*(m+1):(i+1)*(m+1), None] for i in range(tree_nodes)], dim=-1) # batchx (m+1) x new_tree_nodes
attention = F.softmax(e, dim=-2)
attention = F.dropout(attention, self.drop_out, training=self.training)
attention = attention.permute((0, 2, 1))
attention = attention.unsqueeze(2) # shape of (batch x new_tree_nodes x 1 x (m+1))
h = h.permute((0, 2, 1, 3)) # shape of (batch x new_tree_nodes x (m+1) x p')
new_sub_graph = t.matmul(attention, h).squeeze(2) # shape of (batch x new_tree_nodes x p')
if self.concat:
new_sub_graph = F.elu(new_sub_graph)
return new_sub_graph, attention
class AttentionSCN(nn.Module):
"""implement attention-based convolutional neural network"""
def __init__(self, opt, nheads, in_channel, out_channel, alpha=0.2, concat=True):
super(AttentionSCN, self).__init__()
self.drop_out = opt.drop_out
self.attentions = [AttentionLayer(opt, in_channel, out_channel, alpha, concat) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.W = nn.Parameter(t.zeros(size=(out_channel*nheads, out_channel)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
def forward(self, sub_graph):
"""
apply multihead attention-based convolution to sub_graph
:param sub_graph: previous sub_graph, shape of (batch x (m+1) x new_tree_nodes x p)
:return: [new sub_graph, shape of (batch x new_tree_nodes x p),
attention cofficients, shape of batch x m x new_tree_nodes]
"""
# sub_graph = F.dropout(sub_graph, self.drop_out, training=self.training) # 在调试代码时,可考虑注释本行,观察是否能提高效率!
new_sub_graph, attentions = [], []
for att in self.attentions:
h, attention = att(sub_graph)
new_sub_graph.append(h)
attentions.append(attention)
new_sub_graph = t.cat(new_sub_graph, dim=-1) # shape of (batch x new_tree_nodes x (nheads*p'))
attentions = t.cat(attentions, dim=-2) # shape of (batch x new_tree_nodes x nheads x (m+1))
new_sub_graph = F.dropout(new_sub_graph, self.drop_out, training=self.training)
new_sub_graph = t.matmul(new_sub_graph, self.W) # shape of (batch x new_tree_nodes x p')
new_sub_graph = F.elu(new_sub_graph)
return new_sub_graph, attentions
| StarcoderdataPython |
3498111 | #!flask/bin/python
from flask import Flask, request, logging
from KNX import *
import logging
app = Flask(__name__)
@app.route('/store/<int:floor_id>/<int:store_id>', methods=['POST'], strict_slashes=False)
def setStore(floor_id, store_id):
# Example values: 200 2 2 3/4/1
content = request.get_json()
value = int(int(content['value']) * 255 / 100)
size = '2'
acpi = '2'
group = '3/' + str(floor_id) + '/' + str(store_id)
res = process(value, size, acpi, group)
return res
@app.route('/radiator/<int:floor_id>/<int:radiator_id>', methods=['POST'], strict_slashes=False)
def setRadiator(floor_id, radiator_id):
# 200 2 2 3/4/1
content = request.get_json()
value = int(int(content['value']) * 255 / 100)
size = '2'
acpi = '2'
group = '0/' + str(floor_id) + '/' + str(radiator_id)
res = process(value, size, acpi, group)
return res
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('hello')
app.run(host='::', debug=True, port=5500)
| StarcoderdataPython |
3394554 | from animation import Animation
import time
import numpy as np
import colorsys
from numpy import pi
import travellers
import standers
from utils import add_params
class Brendo(Animation):
layout_type = "Layout"
def __init__(self, layout,
shift_rate=0,
num_sections=10,
**kwargs
):
"""
Shifts pixel colors along a hue range in the order that the led strips woulf be laid in
period is the number of seconds it takes a color to lap the cube
hue_range defines the range of colors used as a prop of the color wheel
"""
super(Brendo, self).__init__()
self.layout = layout
self.add_param("num_sections", num_sections, 1, 10)
self.add_param("shift_rate", shift_rate, 0, 30)
# Travellers
traveller_params = {"tr_"+key: value for key,value in travellers.params.items()}
standers_params = {"st_"+key: value for key,value in standers.params.items()}
add_params(self, traveller_params, strict=False, **kwargs)
add_params(self, standers_params, strict=False, **kwargs)
def update(self):
# Get Shwifty
shift_rate = self.params["shift_rate"].value
n = int(time.time()*shift_rate)%len(self.layout.pixels)
pixels = self.layout.pixels[n:] + self.layout.pixels[:n]
# Slice them and dice them
num_sections = int(self.params["num_sections"].value)
indices = np.linspace(0, len(pixels), num_sections+1).astype(int)
strips = []
for i in range(len(indices)-1):
strips.append(pixels[indices[i]:indices[i+1]])
# Update
curr_pattern = True
for strip in strips:
curr_pattern = not curr_pattern
if curr_pattern:
self.update_stander(strip)
else:
self.update_traveller(strip)
def update_traveller(self, pixels):
w = self.params["tr_width"].value
a = self.params["tr_amplitude"].value
v = self.params["tr_speed"].value
spacing = self.params["tr_spacing"].value
hue = self.params["tr_hue"].value
hue_range = self.params["tr_hue_range"].value
sat = self.params["tr_saturation"].value
mod_intensity = self.params["tr_mod_intensity"].value
delta = self.params["tr_delta"].value
travellers.update_pixels(pixels, w, a, v, spacing, hue, hue_range, sat, self.fft, mod_intensity, delta)
def update_stander(self, pixels):
w = self.params["st_frequency"].value
A = self.params["st_amplitude"].value
l = self.params["st_wavelength"].value
sat = self.params["st_saturation"].value
hue = self.params["st_hue"].value
fft_index = int(self.params["st_fft_channel"].value)
mod = self.fft[fft_index]
standers.update_pixels(pixels, w, A, l, sat, hue, mod)
| StarcoderdataPython |
8140480 | <filename>mailer/engine.py<gh_stars>0
from time import sleep
from models import Message, DontSendEntry, MessageLog
from django.core.mail import *
## configuration settings
# @@@ eventually move to settings.py
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = 300
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
while Message.objects.high_priority().count() or Message.objects.medium_priority().count():
while Message.objects.high_priority().count():
for message in Message.objects.high_priority().order_by('when_added'):
yield message
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count():
yield Message.objects.medium_priority().order_by('when_added')[0]
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count() == 0 and Message.objects.low_priority().count():
yield Message.objects.low_priority().order_by('when_added')[0]
if Message.objects.all().count() == 0:
break
def send_all():
"""
Send all eligible messages in the queue.
"""
for message in prioritize():
if DontSendEntry.objects.has_address(message.to_address):
print "skipping email to %s as on don't send list " % message.to_address
MessageLog.objects.log(message, 2) # @@@ avoid using literal result code
message.delete()
else:
print "sending message '%s' to %s" % (message.subject, message.to_address)
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
em = EmailMessage(subject=message.subject, body=message.message_body, from_email= message.from_address, to=message.to_address)
em.send()
message.delete()
def send_loop():
"""
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
sending messages if any are on queue.
"""
while True:
while not Message.objects.all():
print 'sleeping for %s seconds before checking queue again' % EMPTY_QUEUE_SLEEP
sleep(EMPTY_QUEUE_SLEEP)
send_all()
| StarcoderdataPython |
237642 | current_cycle = '2021-3'
pilatus_data_dir = "/exp_path/hdf"
| StarcoderdataPython |
143245 | <gh_stars>1-10
from typing import Any, List, Dict, Optional, Sequence
from scipy import sparse
import os
import sklearn
import numpy # NOTE numpy is used only to set types and nan values. FIXME with correct types from AF
# import arrayfire as af # FIXME uncommnet when line 300 is resolved
from numpy import ndarray # FIXME to arrayfire ndarray typing
import pandas
# Custom import commands if any
from .afSimpleImputer import afSimpleImputer as SimpleImputer
# FIXME uncommnet when line 300 is resolved
# from .SKImputer_base_af import _get_mask
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m import utils
from d3m.base import utils as base_utils
from d3m.exceptions import PrimitiveNotFittedError
from d3m.primitive_interfaces.base import CallResult, DockerContainer
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
Inputs = d3m_dataframe
Outputs = d3m_dataframe
class Params(params.Params):
statistics_: Optional[ndarray]
indicator_: Optional[sklearn.base.BaseEstimator]
input_column_names: Optional[pandas.core.indexes.base.Index]
target_names_: Optional[Sequence[Any]]
training_indices_: Optional[Sequence[int]]
target_column_indices_: Optional[Sequence[int]]
target_columns_metadata_: Optional[List[Dict]]
class Hyperparams(hyperparams.Hyperparams):
missing_values = hyperparams.Union(
configuration={
'int': hyperparams.Hyperparameter[int](
default=0,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
),
'float': hyperparams.Hyperparameter[float](
default=numpy.nan,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
)
},
default='float',
description='The placeholder for the missing values. All occurrences of `missing_values` will be imputed.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
strategy = hyperparams.Enumeration[str](
default='mean',
values=['median', 'most_frequent', 'mean', 'constant'],
description='The imputation strategy. - If "mean", then replace missing values using the mean along each column. Can only be used with numeric data. - If "median", then replace missing values using the median along each column. Can only be used with numeric data. - If "most_frequent", then replace missing using the most frequent value along each column. Can be used with strings or numeric data. - If "constant", then replace missing values with fill_value. Can be used with strings or numeric data. .. versionadded:: 0.20 strategy="constant" for fixed value imputation.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
add_indicator = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
fill_value = hyperparams.Union(
configuration={
'int': hyperparams.Hyperparameter[int](
default=0,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
),
'none': hyperparams.Constant(
default=None,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
)
},
default='none',
description='When strategy == "constant", fill_value is used to replace all occurrences of missing_values. If left to the default, fill_value will be 0 when imputing numerical data and "missing_value" for strings or object data types.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='new',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
error_on_no_input = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.",
)
return_semantic_type = hyperparams.Enumeration[str](
values=['https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'],
default='https://metadata.datadrivendiscovery.org/types/Attribute',
description='Decides what semantic type to attach to generated attributes',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
class SKImputer(UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Primitive for ArrayFire accelerated variant of sklearn SimpleImputer
`sklearn documentation <https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html>`_
"""
__author__ = "ArrayFire"
metadata = metadata_base.PrimitiveMetadata({
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.IMPUTATION, ],
"name": "ArrayFire.impute.SimpleImputer",
"primitive_family": metadata_base.PrimitiveFamily.DATA_CLEANING,
"python_path": "d3m.primitives.data_cleaning.imputer.ArrayFire",
"source": {'name': 'ArrayFire', 'contact': 'mailto:<EMAIL>', 'uris': ['https://github.com/arrayfire/d3m-arrayfire-primitives.git']},
"version": "0.1.0",
"id": "21709973-f877-4700-8675-92ac10a208d3",
"hyperparams_to_tune": ['strategy'],
'installation': [
{'type': metadata_base.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/arrayfire/d3m-arrayfire-primitives.git@{git_commit}#egg=af_primitives'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
}]
})
def __init__(self, *,
hyperparams: Hyperparams,
random_seed: int = 0,
docker_containers: Dict[str, DockerContainer] = None,
_verbose: int = 0) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
# False
self._clf = SimpleImputer(
missing_values=self.hyperparams['missing_values'],
strategy=self.hyperparams['strategy'],
add_indicator=self.hyperparams['add_indicator'],
fill_value=self.hyperparams['fill_value'],
verbose=_verbose
)
self._inputs = None
self._outputs = None
self._training_inputs = None
self._training_outputs = None
self._target_names = None
self._training_indices = None
self._target_column_indices = None
self._target_columns_metadata: List[Dict] = None
self._input_column_names = None
self._fitted = False
def set_training_data(self, *, inputs: Inputs) -> None:
self._inputs = inputs
self._fitted = False
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
if self._fitted:
return CallResult(None)
self._training_inputs, self._training_indices, _ = self._get_columns_to_fit(self._inputs, self.hyperparams)
self._input_column_names = self._training_inputs.columns.astype(str)
if self._training_inputs is None:
return CallResult(None)
if len(self._training_indices) > 0:
self._clf.fit(self._training_inputs)
self._fitted = True
else:
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
return CallResult(None)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
sk_inputs, columns_to_use, _ = self._get_columns_to_fit(inputs, self.hyperparams)
output = []
if len(sk_inputs.columns):
try:
sk_output = self._clf.transform(sk_inputs)
except sklearn.exceptions.NotFittedError as error:
raise PrimitiveNotFittedError("Primitive not fitted.") from error
if sparse.issparse(sk_output):
sk_output = pandas.DataFrame.sparse.from_spmatrix(sk_output)
target_columns_metadata = self._copy_columns_metadata(
inputs.metadata, self._training_indices, self.hyperparams)
output = self._wrap_predictions(inputs, sk_output, target_columns_metadata)
output.columns = [
inputs.columns[idx] for idx in range(len(inputs.columns)) if idx in self._training_indices]
output = [output]
else:
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
_, _, dropped_cols = self._get_columns_to_fit(inputs, self.hyperparams)
outputs = base_utils.combine_columns(
return_result=self.hyperparams['return_result'], add_index_columns=self.hyperparams['add_index_columns'],
inputs=inputs, column_indices=self._training_indices + dropped_cols, columns_list=output)
return CallResult(outputs)
def get_params(self) -> Params:
if not self._fitted:
return Params(
statistics_=None,
indicator_=None,
input_column_names=self._input_column_names,
training_indices_=self._training_indices,
target_names_=self._target_names,
target_column_indices_=self._target_column_indices,
target_columns_metadata_=self._target_columns_metadata
)
return Params(
statistics_=getattr(self._clf, 'statistics_', None),
indicator_=getattr(self._clf, 'indicator_', None),
input_column_names=self._input_column_names,
training_indices_=self._training_indices,
target_names_=self._target_names,
target_column_indices_=self._target_column_indices,
target_columns_metadata_=self._target_columns_metadata
)
def set_params(self, *, params: Params) -> None:
self._clf.statistics_ = params['statistics_']
self._clf.indicator_ = params['indicator_']
self._input_column_names = params['input_column_names']
self._training_indices = params['training_indices_']
self._target_names = params['target_names_']
self._target_column_indices = params['target_column_indices_']
self._target_columns_metadata = params['target_columns_metadata_']
if params['statistics_'] is not None:
self._fitted = True
if params['indicator_'] is not None:
self._fitted = True
@classmethod
def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams):
if not hyperparams['use_semantic_types']:
columns_to_produce = list(range(len(inputs.columns)))
else:
inputs_metadata = inputs.metadata
def can_produce_column(column_index: int) -> bool:
return cls._can_produce_column(inputs_metadata, column_index, hyperparams)
columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(
inputs_metadata, use_columns=hyperparams['use_columns'],
exclude_columns=hyperparams['exclude_columns'], can_use_column=can_produce_column)
columns_to_drop = cls._get_columns_to_drop(inputs, columns_to_produce, hyperparams)
for col in columns_to_drop:
columns_to_produce.remove(col)
return inputs.iloc[:, columns_to_produce], columns_to_produce, columns_to_drop
@classmethod
def _get_columns_to_drop(cls, inputs: Inputs, column_indices: List[int], hyperparams: Hyperparams):
"""
Check for columns that contain missing_values that need to be imputed
If strategy is constant and missin_values is nan, then all nan columns will not be dropped
:param inputs:
:param column_indices:
:return:
"""
columns_to_remove = []
if hyperparams['strategy'] != "constant":
for _, col in enumerate(column_indices):
# BUG
# FIXME with uncomment below when resolved
# inp = inputs.iloc[:, [col]].values
# mask = _get_mask(inp, hyperparams['missing_values'])
# if af.all_true(mask):
# columns_to_remove.append(col)
# FIXME remove pass when bug is fixed
pass
return columns_to_remove
@classmethod
def _can_produce_column(
cls, inputs_metadata: metadata_base.DataMetadata, column_index: int, hyperparams: Hyperparams) -> bool:
column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))
accepted_structural_types = (int, float, numpy.integer, numpy.float64)
accepted_semantic_types = set()
accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute")
if not issubclass(column_metadata['structural_type'], accepted_structural_types):
return False
semantic_types = set(column_metadata.get('semantic_types', []))
if len(semantic_types) == 0:
cls.logger.warning("No semantic types found in column metadata")
return False
# Making sure all accepted_semantic_types are available in semantic_types
if len(accepted_semantic_types - semantic_types) == 0:
return True
return False
@classmethod
def _get_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams) -> List[Dict]:
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[Dict] = []
for column_index in range(outputs_length):
column_metadata = dict(outputs_metadata.query_column(column_index))
# Update semantic types and prepare it for predicted targets.
semantic_types = set(column_metadata.get('semantic_types', []))
semantic_types_to_remove = set([])
add_semantic_types = []
add_semantic_types.add(hyperparams["return_semantic_type"])
semantic_types = semantic_types - semantic_types_to_remove
semantic_types = semantic_types.union(add_semantic_types)
column_metadata['semantic_types'] = list(semantic_types)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
@classmethod
def _update_predictions_metadata(
cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs],
target_columns_metadata: List[Dict]) -> metadata_base.DataMetadata:
outputs_metadata = metadata_base.DataMetadata().generate(value=outputs)
for column_index, column_metadata in enumerate(target_columns_metadata):
column_metadata.pop("structural_type", None)
outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)
return outputs_metadata
def _wrap_predictions(self, inputs: Inputs, predictions: ndarray, target_columns_metadata) -> Outputs:
outputs = d3m_dataframe(predictions, generate_metadata=False)
outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata)
return outputs
@classmethod
def _copy_columns_metadata(
cls, inputs_metadata: metadata_base.DataMetadata, column_indices, hyperparams) -> List[Dict]:
target_columns_metadata: List[Dict] = []
for column_index in column_indices:
column_name = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index)).get("name")
column_metadata = dict(inputs_metadata.query_column(column_index))
semantic_types = set(column_metadata.get('semantic_types', []))
semantic_types_to_remove = set([])
add_semantic_types = set()
add_semantic_types.add(hyperparams["return_semantic_type"])
semantic_types = semantic_types - semantic_types_to_remove
semantic_types = semantic_types.union(add_semantic_types)
column_metadata['semantic_types'] = list(semantic_types)
column_metadata["name"] = str(column_name)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
afSKImputer.__doc__ = SimpleImputer.__doc__
| StarcoderdataPython |
12840405 | <reponame>cffbots/ESMValTool<filename>esmvaltool/cmorizers/obs/cmorize_obs_aphro_ma.py<gh_stars>100-1000
"""ESMValTool CMORizer for APHRODITE Monsoon Asia (APHRO-MA) data.
Tier
Tier 3: restricted dataset.
Source
http://aphrodite.st.hirosaki-u.ac.jp/download/
Last access
20200306
Download and processing instructions
Register at
http://aphrodite.st.hirosaki-u.ac.jp/download/create/
Download the following files from
http://aphrodite.st.hirosaki-u.ac.jp/product/:
APHRO_V1808_TEMP/APHRO_MA
025deg_nc/APHRO_MA_TAVE_025deg_V1808.nc.tgz
050deg_nc/APHRO_MA_TAVE_050deg_V1808.nc.tgz
APHRO_V1101/APHRO_MA
025deg_nc/APHRO_MA_025deg_V1101.1951-2007.nc.gz.tar
050deg_nc/APHRO_MA_050deg_V1101.1951-2007.nc.gz.tar
APHRO_V1101EX_R1/APHRO_MA
025deg_nc/APHRO_MA_025deg_V1101_EXR1.nc.tgz
050deg_nc/APHRO_MA_050deg_V1101_EXR1.nc.tgz
Please untar / unzip all *.tar *.tgz *.gz files in the same directory
(no subdirectories!) prior to running the cmorizer!
Issues:
In input file APHRO_MA_TAVE_050deg_V1808.2015.nc the input variable is
called ta instead of tave as in the other files.
Currently resolved using raw_fallback: ta in case of thrown
iris.exceptions.ConstraintMismatchError
Refs:
APHRO_V1101 and APHRO_V1101EX_R1
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>, and
<NAME>, 2012: APHRODITE: Constructing a Long-Term Daily Gridded
Precipitation Dataset for Asia Based on a Dense Network of Rain Gauges.
Bull. Amer. Meteor. Soc., 93, 1401–1415
https://doi.org/10.1175/BAMS-D-11-00122.1
APHRO_V1808_TEMP
<NAME>., <NAME>., <NAME>. (2011) Development of a long-term
daily gridded temperature dataset and its application to rain/snow
discrimination of daily precipitation,
Global Environmental Research 15 (2), 165-172
"""
import logging
from warnings import catch_warnings, filterwarnings
from pathlib import Path
import iris
from esmvalcore.preprocessor import monthly_statistics
from . import utilities as utils
logger = logging.getLogger(__name__)
def _extract_variable(short_name, var, cfg, filepath, out_dir, version):
"""Extract variable."""
logger.info("CMORizing variable '%s' from input file '%s'", short_name,
filepath)
with catch_warnings():
filterwarnings(
action='ignore',
message="Skipping global attribute 'calendar': 'calendar' is .*",
category=UserWarning,
module='iris',
)
try:
cube = iris.load_cube(
str(filepath),
constraint=utils.var_name_constraint(var['raw']),
)
except iris.exceptions.ConstraintMismatchError:
cube = iris.load_cube(
str(filepath),
constraint=utils.var_name_constraint(var['raw_fallback']),
)
# Fix var units
cmor_info = cfg['cmor_table'].get_variable(var['mip'], short_name)
cube.units = var.get('raw_units', short_name)
cube.convert_units(cmor_info.units)
utils.fix_var_metadata(cube, cmor_info)
# fix coordinates
if 'height2m' in cmor_info.dimensions:
utils.add_height2m(cube)
utils.fix_coords(cube)
# Fix metadata
attrs = cfg['attributes'].copy()
attrs['mip'] = var['mip']
attrs['version'] = version.replace('_', '-')
attrs['reference'] = var['reference']
attrs['source'] = attrs['source']
utils.set_global_atts(cube, attrs)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
if 'add_mon' in var.keys():
if var['add_mon']:
logger.info("Building monthly means")
# Calc monthly
cube = monthly_statistics(cube)
cube.remove_coord('month_number')
cube.remove_coord('year')
# Fix metadata
attrs['mip'] = 'Amon'
# Fix coordinates
utils.fix_coords(cube)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
raw_filename = cfg['filename']
# Run the cmorization
for (short_name, var) in cfg['variables'].items():
for version in var['version'].values():
logger.info("CMORizing variable '%s'", short_name)
filenames = raw_filename.format(raw_file_var=var['raw_file_var'],
version=version)
for filepath in sorted(Path(in_dir).glob(filenames)):
_extract_variable(short_name, var, cfg, filepath, out_dir,
version)
| StarcoderdataPython |
5149056 | <filename>python_system.bzl
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generates a local repository that points at the system's Python installation."""
_BUILD_FILE = '''# Description:
# Build rule for Python
exports_files(["defs.bzl"])
cc_library(
name = "python_headers",
hdrs = glob(["python3/**/*.h"]),
includes = ["python3"],
visibility = ["//visibility:public"],
)
'''
_GET_PYTHON_INCLUDE_DIR = """
import sys
from distutils.sysconfig import get_python_inc
sys.stdout.write(get_python_inc())
""".strip()
_GET_PYTHON_SOABI = """
from packaging import tags
tag = next(iter(tags.sys_tags()))
print(f'PY_TAGS = struct(interpreter = "{tag.interpreter}", abi = "{tag.abi}", platform = "{tag.platform}")')
""".strip()
def _python_repo_impl(repository_ctx):
"""Creates external/<reponame>/BUILD, a python3 symlink, and other files."""
repository_ctx.file("BUILD", _BUILD_FILE)
result = repository_ctx.execute(["python3", "-c", _GET_PYTHON_INCLUDE_DIR])
if result.return_code:
fail("Failed to run local Python interpreter: %s" % result.stderr)
repository_ctx.symlink(result.stdout, "python3")
result = repository_ctx.execute(["python3", "-c", _GET_PYTHON_SOABI])
if result.return_code:
fail("Failed to run local Python interpreter: %s" % result.stderr)
repository_ctx.file("defs.bzl", result.stdout)
python_repo = repository_rule(
implementation = _python_repo_impl,
configure = True,
local = True,
)
| StarcoderdataPython |
5045047 | # Idea from
#<EMAIL>
def fiboSum(n):
if n==0: return [-1]
if n==1: return [1]
total = 0
fibo = [1]
f = 1
while f<=n:
fibo.append(f)
f+=fibo[len(fibo)-2]
fibo.pop(1)
index = len(fibo)-1
while sum(fibo)!=n:
if sum(fibo)-fibo[index] ==n:
fibo.pop(index)
return fibo
elif sum(fibo)-fibo[index] <=n:
index-=1
else:
fibo.pop(index)
index-=1
return [-1]
| StarcoderdataPython |
46059 | <gh_stars>1-10
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from StringIO import StringIO
from robot.output import LOGGER
class OutputCapturer:
def __init__(self, library_import=False):
if library_import:
LOGGER.enable_library_import_logging()
self._library_import = library_import
self._python_out = _PythonCapturer(stdout=True)
self._python_err = _PythonCapturer(stdout=False)
self._java_out = _JavaCapturer(stdout=True)
self._java_err = _JavaCapturer(stdout=False)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_trace):
self.release_and_log()
return False
def release_and_log(self):
stdout, stderr = self._release()
if stdout:
LOGGER.log_output(stdout)
if stderr:
LOGGER.log_output(stderr)
sys.__stderr__.write(stderr+'\n')
if self._library_import:
LOGGER.disable_library_import_logging()
def _release(self):
py_out = self._python_out.release()
py_err = self._python_err.release()
java_out = self._java_out.release()
java_err = self._java_err.release()
# This should return both Python and Java stdout/stderr.
# It is unfortunately not possible to do py_out+java_out here, because
# java_out is always Unicode and py_out is bytes (=str). When py_out
# contains non-ASCII bytes catenation fails with UnicodeError.
# Unfortunately utils.unic(py_out) doesn't work either, because later
# splitting the output to levels and messages fails. Should investigate
# why that happens. It also seems that the byte message are never
# converted to Unicode - at least Message class doesn't do that.
# It's probably safe to leave this code like it is in RF 2.5, because
# a) the earlier versions worked the same way, and b) this code is
# used so that there should never be output both from Python and Java.
return (py_out, py_err) if (py_out or py_err) else (java_out, java_err)
class _PythonCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = sys.stdout
self._set_stream = self._set_stdout
else:
self._original = sys.stderr
self._set_stream = self._set_stderr
self._stream = StringIO()
self._set_stream(self._stream)
def _set_stdout(self, stream):
sys.stdout = stream
def _set_stderr(self, stream):
sys.stderr = stream
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
self._stream.flush()
output = self._stream.getvalue()
self._stream.close()
return output
if not sys.platform.startswith('java'):
class _JavaCapturer(object):
def __init__(self, stdout):
pass
def release(self):
return ''
else:
from java.io import PrintStream, ByteArrayOutputStream
from java.lang import System
class _JavaCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = System.out
self._set_stream = System.setOut
else:
self._original = System.err
self._set_stream = System.setErr
self._bytes = ByteArrayOutputStream()
self._stream = PrintStream(self._bytes, False, 'UTF-8')
self._set_stream(self._stream)
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
self._stream.close()
output = self._bytes.toString('UTF-8')
self._bytes.reset()
return output
| StarcoderdataPython |
8048029 | <filename>model/efficientnet.py
from efficientnet_pytorch import EfficientNet
import torch.nn.functional as F
import torch.nn as nn
class efficientnet(nn.Module):
def __init__(self, num_classes=1000):
super(efficientnet, self).__init__()
self.encoder = EfficientNet.from_pretrained('efficientnet-b1',num_classes=num_classes)
self.GAP = nn.AdaptiveAvgPool2d((1,1))
self.classifier = nn.Linear(1280,num_classes)
def forward(self, x):
feature = self.encoder.extract_features(x)
feature = self.GAP(feature)
feature = feature.view(feature.size(0), -1)
logit = self.classifier(feature)
return logit
def extract_feature(self,x):
feature = self.encoder.extract_features(x)
feature = self.GAP(feature)
feature = feature.view(feature.size(0), -1)
return feature | StarcoderdataPython |
1678723 | import os
from flask import Flask, render_template, request, make_response, session, url_for, redirect, flash, abort
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.secret_key = 'wohohoho'
#upload
ALLOWED_EXTENSION = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] ='uploads'
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSION
@app.route('/uploadfile', methods=['GET', 'POST'])
def uploadFile():
if request.method == 'POST':
file = request.files['file']
if 'file' not in request.files:
return redirect(request.url)
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return 'file berhasil disave di...' + filename
return render_template('upload.html') | StarcoderdataPython |
4850104 | print('='*21)
print('Calculadora de média')
print('='*21)
n1 = float(input('Insira uma nota: '))
n2 = float(input('Insita a segunda nota: '))
print(f'A média de notas é igual a {(n1+n2)/2:.1f}.')
| StarcoderdataPython |
8072258 | import dash_html_components as html
from app import app
layout = html.Div(
'animations'
) | StarcoderdataPython |
11362659 | <reponame>curiousleo/wren
import re
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from pygments.lexer import RegexLexer
from pygments.token import *
class WrenLexer(RegexLexer):
name = 'Wren'
aliases = ['wren']
filenames = ['*.wren']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# Whitespace.
(r'\s+', Text),
(r'[,\\\[\]{}]', Punctuation),
# Push a parenthesized state so that we know the corresponding ')'
# is for a parenthesized expression and not interpolation.
(r'\(', Punctuation, ('parenthesized', 'root')),
# In this state, we don't know whether a closing ')' is for a
# parenthesized expression or the end of an interpolation. So, do
# a non-consuming match and let the parent state (either
# 'parenthesized' or 'interpolation' decide.
(r'(?=\))', Text, '#pop'),
# Keywords.
(r'(break|class|construct|else|for|foreign|if|import|in|is|'
r'return|static|super|var|while)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'this\b', Name.Builtin),
# Comments.
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*?$', Comment.Single),
# Names and operators.
(r'[~!$%^&*\-=+\\|/?<>\.:]+', Operator),
(r'[A-Z][a-zA-Z_0-9]+', Name.Variable.Global),
(r'__[a-zA-Z_0-9]+', Name.Variable.Class),
(r'_[a-zA-Z_0-9]+', Name.Variable.Instance),
(r'[a-z][a-zA-Z_0-9]+', Name),
# Numbers.
(r'\d+\.\d+([eE]-?\d+)?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings.
(r'L?"', String, 'string'),
],
'comment': [
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'.', Comment.Multiline), # All other characters.
],
'string': [
(r'"', String, '#pop'),
(r'\\[\\%0abfnrtv"\']', String.Escape), # Escape.
(r'\\x[a-fA-F0-9]{2}', String.Escape), # Byte escape.
(r'\\u[a-fA-F0-9]{4}', String.Escape), # Unicode escape.
(r'\\U[a-fA-F0-9]{8}', String.Escape), # Long Unicode escape.
(r'%\(', String.Interpol, ('interpolation', 'root')),
(r'.', String), # All other characters.
],
'parenthesized': [
# We only get to this state when we're at a ')'.
(r'\)', Punctuation, '#pop'),
],
'interpolation': [
# We only get to this state when we're at a ')'.
(r'\)', String.Interpol, '#pop'),
],
}
| StarcoderdataPython |
1889385 | """
Server related classes (Daemon etc)
Pyro - Python Remote Objects. Copyright by <NAME> (<EMAIL>).
"""
import os
import sys
import uuid
import time
import socket
import collections
import threading
import logging
import inspect
import warnings
import serpent
from . import config, core, errors, serializers, socketutil, protocol, client
__all__ = ["Daemon", "DaemonObject", "callback", "expose", "behavior", "oneway"]
log = logging.getLogger("Pyro5.server")
_private_dunder_methods = frozenset([
"__init__", "__init_subclass__", "__class__", "__module__", "__weakref__",
"__call__", "__new__", "__del__", "__repr__",
"__str__", "__format__", "__nonzero__", "__bool__", "__coerce__",
"__cmp__", "__eq__", "__ne__", "__hash__", "__ge__", "__gt__", "__le__", "__lt__",
"__dir__", "__enter__", "__exit__", "__copy__", "__deepcopy__", "__sizeof__",
"__getattr__", "__setattr__", "__hasattr__", "__getattribute__", "__delattr__",
"__instancecheck__", "__subclasscheck__", "__getinitargs__", "__getnewargs__",
"__getstate__", "__setstate__", "__reduce__", "__reduce_ex__", "__subclasshook__"
])
def is_private_attribute(attr_name):
"""returns if the attribute name is to be considered private or not."""
if attr_name in _private_dunder_methods:
return True
if not attr_name.startswith('_'):
return False
if len(attr_name) > 4 and attr_name.startswith("__") and attr_name.endswith("__"):
return False
return True
# decorators
def callback(method):
"""
decorator to mark a method to be a 'callback'. This will make Pyro
raise any errors also on the callback side, and not only on the side
that does the callback call.
"""
method._pyroCallback = True
return method
def oneway(method):
"""
decorator to mark a method to be oneway (client won't wait for a response)
"""
method._pyroOneway = True
return method
def expose(method_or_class):
"""
Decorator to mark a method or class to be exposed for remote calls.
You can apply it to a method or a class as a whole.
If you need to change the default instance mode or instance creator, also use a @behavior decorator.
"""
if inspect.isdatadescriptor(method_or_class):
func = method_or_class.fget or method_or_class.fset or method_or_class.fdel
if is_private_attribute(func.__name__):
raise AttributeError("exposing private names (starting with _) is not allowed")
func._pyroExposed = True
return method_or_class
attrname = getattr(method_or_class, "__name__", None)
if not attrname:
# we could be dealing with a descriptor (classmethod/staticmethod), this means the order of the decorators is wrong
if inspect.ismethoddescriptor(method_or_class):
attrname = method_or_class.__get__(None, dict).__name__
raise AttributeError("using @expose on a classmethod/staticmethod must be done "
"after @classmethod/@taticmethod. Method: " + attrname)
else:
raise AttributeError("@expose cannot determine what this is: "+repr(method_or_class))
if is_private_attribute(attrname):
raise AttributeError("exposing private names (starting with _) is not allowed")
if inspect.isclass(method_or_class):
clazz = method_or_class
log.debug("exposing all members of %r", clazz)
for name in clazz.__dict__:
if is_private_attribute(name):
continue
thing = getattr(clazz, name)
if inspect.isfunction(thing):
thing._pyroExposed = True
elif inspect.ismethod(thing):
thing.__func__._pyroExposed = True
elif inspect.isdatadescriptor(thing):
if getattr(thing, "fset", None):
thing.fset._pyroExposed = True
if getattr(thing, "fget", None):
thing.fget._pyroExposed = True
if getattr(thing, "fdel", None):
thing.fdel._pyroExposed = True
clazz._pyroExposed = True
return clazz
method_or_class._pyroExposed = True
return method_or_class
def behavior(instance_mode="session", instance_creator=None):
"""
Decorator to specify the server behavior of your Pyro class.
"""
def _behavior(clazz):
if not inspect.isclass(clazz):
raise TypeError("behavior decorator can only be used on a class")
if instance_mode not in ("single", "session", "percall"):
raise ValueError("invalid instance mode: " + instance_mode)
if instance_creator and not callable(instance_creator):
raise TypeError("instance_creator must be a callable")
clazz._pyroInstancing = (instance_mode, instance_creator)
return clazz
if not isinstance(instance_mode, str):
raise SyntaxError("behavior decorator is missing argument(s)")
return _behavior
@expose
class DaemonObject(object):
"""The part of the daemon that is exposed as a Pyro object."""
def __init__(self, daemon):
self.daemon = daemon
def registered(self):
"""returns a list of all object names registered in this daemon"""
return list(self.daemon.objectsById.keys())
def ping(self):
"""a simple do-nothing method for testing purposes"""
pass
def info(self):
"""return some descriptive information about the daemon"""
return "%s bound on %s, NAT %s, %d objects registered. Servertype: %s" % (
core.DAEMON_NAME, self.daemon.locationStr, self.daemon.natLocationStr,
len(self.daemon.objectsById), self.daemon.transportServer)
def get_metadata(self, objectId, as_lists=False):
"""
Get metadata for the given object (exposed methods, oneways, attributes).
"""
obj = self.daemon.objectsById.get(objectId)
if obj is not None:
metadata = get_exposed_members(obj, as_lists=as_lists)
if not metadata["methods"] and not metadata["attrs"]:
# Something seems wrong: nothing is remotely exposed.
warnings.warn("Class %r doesn't expose any methods or attributes. Did you forget setting @expose on them?" % type(obj))
return metadata
else:
log.debug("unknown object requested: %s", objectId)
raise errors.DaemonError("unknown object")
def get_next_stream_item(self, streamId):
if streamId not in self.daemon.streaming_responses:
raise errors.PyroError("item stream terminated")
client, timestamp, linger_timestamp, stream = self.daemon.streaming_responses[streamId]
if client is None:
# reset client connection association (can be None if proxy disconnected)
self.daemon.streaming_responses[streamId] = (core.current_context.client, timestamp, 0, stream)
try:
return next(stream)
except Exception:
# in case of error (or StopIteration!) the stream is removed
del self.daemon.streaming_responses[streamId]
raise
def close_stream(self, streamId):
if streamId in self.daemon.streaming_responses:
del self.daemon.streaming_responses[streamId]
class Daemon(object):
"""
Pyro daemon. Contains server side logic and dispatches incoming remote method calls
to the appropriate objects.
"""
def __init__(self, host=None, port=0, unixsocket=None, nathost=None, natport=None, interface=DaemonObject, connected_socket=None):
if connected_socket:
nathost = natport = None
else:
if host is None:
host = config.HOST
elif not isinstance(host, str):
host = str(host) # take care of the occasion where host is an ipaddress.IpAddress
if nathost is None:
nathost = config.NATHOST
elif not isinstance(nathost, str):
nathost = str(nathost) # take care of the occasion where host is an ipaddress.IpAddress
if natport is None:
natport = config.NATPORT or None
if nathost and unixsocket:
raise ValueError("cannot use nathost together with unixsocket")
if (nathost is None) ^ (natport is None):
raise ValueError("must provide natport with nathost")
self.__mustshutdown = threading.Event()
self.__mustshutdown.set()
self.__loopstopped = threading.Event()
self.__loopstopped.set()
if connected_socket:
from .svr_existingconn import SocketServer_ExistingConnection
self.transportServer = SocketServer_ExistingConnection()
self.transportServer.init(self, connected_socket)
else:
if config.SERVERTYPE == "thread":
from .svr_threads import SocketServer_Threadpool
self.transportServer = SocketServer_Threadpool()
elif config.SERVERTYPE == "multiplex":
from .svr_multiplex import SocketServer_Multiplex
self.transportServer = SocketServer_Multiplex()
else:
raise errors.PyroError("invalid server type '%s'" % config.SERVERTYPE)
self.transportServer.init(self, host, port, unixsocket)
#: The location (str of the form ``host:portnumber``) on which the Daemon is listening
self.locationStr = self.transportServer.locationStr
log.debug("daemon created on %s - %s (pid %d)", self.locationStr, socketutil.family_str(self.transportServer.sock), os.getpid())
natport_for_loc = natport
if natport == 0:
# expose internal port number as NAT port as well. (don't use port because it could be 0 and will be chosen by the OS)
natport_for_loc = int(self.locationStr.split(":")[1])
#: The NAT-location (str of the form ``nathost:natportnumber``) on which the Daemon is exposed for use with NAT-routing
self.natLocationStr = "%s:%d" % (nathost, natport_for_loc) if nathost else None
if self.natLocationStr:
log.debug("NAT address is %s", self.natLocationStr)
pyroObject = interface(self)
pyroObject._pyroId = core.DAEMON_NAME
#: Dictionary from Pyro object id to the actual Pyro object registered by this id
self.objectsById = {pyroObject._pyroId: pyroObject}
log.debug("pyro protocol version: %d" % protocol.PROTOCOL_VERSION)
self._pyroInstances = {} # pyro objects for instance_mode=single (singletons, just one per daemon)
self.streaming_responses = {} # stream_id -> (client, creation_timestamp, linger_timestamp, stream)
self.housekeeper_lock = threading.Lock()
self.__mustshutdown.clear()
@property
def sock(self):
"""the server socket used by the daemon"""
return self.transportServer.sock
@property
def sockets(self):
"""list of all sockets used by the daemon (server socket and all active client sockets)"""
return self.transportServer.sockets
@property
def selector(self):
"""the multiplexing selector used, if using the multiplex server type"""
return self.transportServer.selector
@staticmethod
def serveSimple(objects, host=None, port=0, daemon=None, ns=True, verbose=True):
"""
Basic method to fire up a daemon (or supply one yourself).
objects is a dict containing objects to register as keys, and
their names (or None) as values. If ns is true they will be registered
in the naming server as well, otherwise they just stay local.
If you need to publish on a unix domain socket you can't use this shortcut method.
See the documentation on 'publishing objects' (in chapter: Servers) for more details.
"""
if daemon is None:
daemon = Daemon(host, port)
with daemon:
if ns:
ns = core.locate_ns()
for obj, name in objects.items():
if ns:
localname = None # name is used for the name server
else:
localname = name # no name server, use name in daemon
uri = daemon.register(obj, localname)
if verbose:
print("Object {0}:\n uri = {1}".format(repr(obj), uri))
if name and ns:
ns.register(name, uri)
if verbose:
print(" name = {0}".format(name))
if verbose:
print("Pyro daemon running.")
daemon.requestLoop()
def requestLoop(self, loopCondition=lambda: True):
"""
Goes in a loop to service incoming requests, until someone breaks this
or calls shutdown from another thread.
"""
self.__mustshutdown.clear()
log.info("daemon %s entering requestloop", self.locationStr)
try:
self.__loopstopped.clear()
self.transportServer.loop(loopCondition=lambda: not self.__mustshutdown.isSet() and loopCondition())
finally:
self.__loopstopped.set()
log.debug("daemon exits requestloop")
def events(self, eventsockets):
"""for use in an external event loop: handle any requests that are pending for this daemon"""
return self.transportServer.events(eventsockets)
def shutdown(self):
"""Cleanly terminate a daemon that is running in the requestloop."""
log.debug("daemon shutting down")
self.streaming_responses = {}
time.sleep(0.02)
self.__mustshutdown.set()
if self.transportServer:
self.transportServer.shutdown()
time.sleep(0.02)
self.close()
self.__loopstopped.wait(timeout=5) # use timeout to avoid deadlock situations
@property
def _shutting_down(self):
return self.__mustshutdown.is_set()
def _handshake(self, conn, denied_reason=None):
"""
Perform connection handshake with new clients.
Client sends a MSG_CONNECT message with a serialized data payload.
If all is well, return with a CONNECT_OK message.
The reason we're not doing this with a MSG_INVOKE method call on the daemon
(like when retrieving the metadata) is because we need to force the clients
to get past an initial connect handshake before letting them invoke any method.
Return True for successful handshake, False if something was wrong.
If a denied_reason is given, the handshake will fail with the given reason.
"""
serializer_id = serializers.MarshalSerializer.serializer_id
msg_seq = 0
try:
msg = protocol.recv_stub(conn, [protocol.MSG_CONNECT])
msg_seq = msg.seq
if denied_reason:
raise Exception(denied_reason)
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon handshake received", msg)
if msg.flags & protocol.FLAGS_CORR_ID:
core.current_context.correlation_id = uuid.UUID(bytes=msg.corr_id)
else:
core.current_context.correlation_id = uuid.uuid4()
serializer_id = msg.serializer_id
serializer = serializers.serializers_by_id[serializer_id]
data = serializer.loads(msg.data)
handshake_response = self.validateHandshake(conn, data["handshake"])
handshake_response = {
"handshake": handshake_response,
"meta": self.objectsById[core.DAEMON_NAME].get_metadata(data["object"], as_lists=True)
}
data = serializer.dumps(handshake_response)
msgtype = protocol.MSG_CONNECTOK
except errors.ConnectionClosedError:
log.debug("handshake failed, connection closed early")
return False
except Exception as x:
log.debug("handshake failed, reason:", exc_info=True)
serializer = serializers.serializers_by_id[serializer_id]
data = serializer.dumps(str(x))
msgtype = protocol.MSG_CONNECTFAIL
# We need a minimal amount of response data or the socket will remain blocked
# on some systems... (messages smaller than 40 bytes)
msg = protocol.SendingMessage(msgtype, 0, msg_seq, serializer_id, data, annotations=self.__annotations())
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon handshake response", msg)
conn.send(msg.data)
return msg.type == protocol.MSG_CONNECTOK
def validateHandshake(self, conn, data):
"""
Override this to create a connection validator for new client connections.
It should return a response data object normally if the connection is okay,
or should raise an exception if the connection should be denied.
"""
return "hello"
def clientDisconnect(self, conn):
"""
Override this to handle a client disconnect.
Conn is the SocketConnection object that was disconnected.
"""
pass
def handleRequest(self, conn):
"""
Handle incoming Pyro request. Catches any exception that may occur and
wraps it in a reply to the calling side, as to not make this server side loop
terminate due to exceptions caused by remote invocations.
"""
request_flags = 0
request_seq = 0
request_serializer_id = serializers.MarshalSerializer.serializer_id
wasBatched = False
isCallback = False
try:
msg = protocol.recv_stub(conn, [protocol.MSG_INVOKE, protocol.MSG_PING])
except errors.CommunicationError as x:
# we couldn't even get data from the client, this is an immediate error
# log.info("error receiving data from client %s: %s", conn.sock.getpeername(), x)
raise x
try:
request_flags = msg.flags
request_seq = msg.seq
request_serializer_id = msg.serializer_id
if msg.flags & protocol.FLAGS_CORR_ID:
core.current_context.correlation_id = uuid.UUID(bytes=msg.corr_id)
else:
core.current_context.correlation_id = uuid.uuid4()
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon wiredata received", msg)
if msg.type == protocol.MSG_PING:
# return same seq, but ignore any data (it's a ping, not an echo). Nothing is deserialized.
msg = protocol.SendingMessage(protocol.MSG_PING, 0, msg.seq, msg.serializer_id, b"pong", annotations=self.__annotations())
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon wiredata sending", msg)
conn.send(msg.data)
return
serializer = serializers.serializers_by_id[msg.serializer_id]
if request_flags & protocol.FLAGS_KEEPSERIALIZED:
# pass on the wire protocol message blob unchanged
objId, method, vargs, kwargs = self.__deserializeBlobArgs(msg)
else:
# normal deserialization of remote call arguments
objId, method, vargs, kwargs = serializer.loadsCall(msg.data)
core.current_context.client = conn
try:
# store, because on oneway calls, socket will be disconnected:
core.current_context.client_sock_addr = conn.sock.getpeername()
except socket.error:
core.current_context.client_sock_addr = None # sometimes getpeername() doesn't work...
core.current_context.seq = msg.seq
core.current_context.annotations = msg.annotations
core.current_context.msg_flags = msg.flags
core.current_context.serializer_id = msg.serializer_id
del msg # invite GC to collect the object, don't wait for out-of-scope
obj = self.objectsById.get(objId)
if obj is not None:
if inspect.isclass(obj):
obj = self._getInstance(obj, conn)
if request_flags & protocol.FLAGS_BATCH:
# batched method calls, loop over them all and collect all results
data = []
for method, vargs, kwargs in vargs:
method = get_attribute(obj, method)
try:
result = method(*vargs, **kwargs) # this is the actual method call to the Pyro object
except Exception as xv:
log.debug("Exception occurred while handling batched request: %s", xv)
xv._pyroTraceback = errors.format_traceback(detailed=config.DETAILED_TRACEBACK)
data.append(core._ExceptionWrapper(xv))
break # stop processing the rest of the batch
else:
data.append(result) # note that we don't support streaming results in batch mode
wasBatched = True
else:
# normal single method call
if method == "__getattr__":
# special case for direct attribute access (only exposed @properties are accessible)
data = get_exposed_property_value(obj, vargs[0])
elif method == "__setattr__":
# special case for direct attribute access (only exposed @properties are accessible)
data = set_exposed_property_value(obj, vargs[0], vargs[1])
else:
method = get_attribute(obj, method)
if request_flags & protocol.FLAGS_ONEWAY:
# oneway call to be run inside its own thread, otherwise client blocking can still occur
# on the next call on the same proxy
_OnewayCallThread(target=method, args=vargs, kwargs=kwargs).start()
else:
isCallback = getattr(method, "_pyroCallback", False)
data = method(*vargs, **kwargs) # this is the actual method call to the Pyro object
if not request_flags & protocol.FLAGS_ONEWAY:
isStream, data = self._streamResponse(data, conn)
if isStream:
# throw an exception as well as setting message flags
# this way, it is backwards compatible with older pyro versions.
exc = errors.ProtocolError("result of call is an iterator")
ann = {"STRM": data.encode()} if data else {}
self._sendExceptionResponse(conn, request_seq, serializer.serializer_id, exc, None,
annotations=ann, flags=protocol.FLAGS_ITEMSTREAMRESULT)
return
else:
log.debug("unknown object requested: %s", objId)
raise errors.DaemonError("unknown object")
if request_flags & protocol.FLAGS_ONEWAY:
return # oneway call, don't send a response
else:
data = serializer.dumps(data)
response_flags = 0
if wasBatched:
response_flags |= protocol.FLAGS_BATCH
msg = protocol.SendingMessage(protocol.MSG_RESULT, response_flags, request_seq, serializer.serializer_id, data,
annotations=self.__annotations())
core.current_context.response_annotations = {}
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon wiredata sending", msg)
conn.send(msg.data)
except Exception as xv:
msg = getattr(xv, "pyroMsg", None)
if msg:
request_seq = msg.seq
request_serializer_id = msg.serializer_id
if not isinstance(xv, errors.ConnectionClosedError):
if not isinstance(xv, (StopIteration, GeneratorExit)):
log.debug("Exception occurred while handling request: %r", xv)
if not request_flags & protocol.FLAGS_ONEWAY:
if isinstance(xv, errors.SerializeError) or not isinstance(xv, errors.CommunicationError):
# only return the error to the client if it wasn't a oneway call, and not a communication error
# (in these cases, it makes no sense to try to report the error back to the client...)
tblines = errors.format_traceback(detailed=config.DETAILED_TRACEBACK)
self._sendExceptionResponse(conn, request_seq, request_serializer_id, xv, tblines)
if isCallback or isinstance(xv, (errors.CommunicationError, errors.SecurityError)):
raise # re-raise if flagged as callback, communication or security error.
def _clientDisconnect(self, conn):
if config.ITER_STREAM_LINGER > 0:
# client goes away, keep streams around for a bit longer (allow reconnect)
for streamId in list(self.streaming_responses):
info = self.streaming_responses.get(streamId, None)
if info and info[0] is conn:
_, timestamp, _, stream = info
self.streaming_responses[streamId] = (None, timestamp, time.time(), stream)
else:
# client goes away, close any streams it had open as well
for streamId in list(self.streaming_responses):
info = self.streaming_responses.get(streamId, None)
if info and info[0] is conn:
del self.streaming_responses[streamId]
self.clientDisconnect(conn) # user overridable hook
def _housekeeping(self):
"""
Perform periodical housekeeping actions (cleanups etc)
"""
if self._shutting_down:
return
with self.housekeeper_lock:
if self.streaming_responses:
if config.ITER_STREAM_LIFETIME > 0:
# cleanup iter streams that are past their lifetime
for streamId in list(self.streaming_responses.keys()):
info = self.streaming_responses.get(streamId, None)
if info:
last_use_period = time.time() - info[1]
if 0 < config.ITER_STREAM_LIFETIME < last_use_period:
del self.streaming_responses[streamId]
if config.ITER_STREAM_LINGER > 0:
# cleanup iter streams that are past their linger time
for streamId in list(self.streaming_responses.keys()):
info = self.streaming_responses.get(streamId, None)
if info and info[2]:
linger_period = time.time() - info[2]
if linger_period > config.ITER_STREAM_LINGER:
del self.streaming_responses[streamId]
self.housekeeping()
def housekeeping(self):
"""
Override this to add custom periodic housekeeping (cleanup) logic.
This will be called every few seconds by the running daemon's request loop.
"""
pass
def _getInstance(self, clazz, conn):
"""
Find or create a new instance of the class
"""
def createInstance(clazz, creator):
try:
if creator:
obj = creator(clazz)
if isinstance(obj, clazz):
return obj
raise TypeError("instance creator returned object of different type")
return clazz()
except Exception:
log.exception("could not create pyro object instance")
raise
instance_mode, instance_creator = clazz._pyroInstancing
if instance_mode == "single":
# create and use one singleton instance of this class (not a global singleton, just exactly one per daemon)
instance = self._pyroInstances.get(clazz)
if not instance:
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
instance = createInstance(clazz, instance_creator)
self._pyroInstances[clazz] = instance
return instance
elif instance_mode == "session":
# Create and use one instance for this proxy connection
# the instances are kept on the connection object.
# (this is the default instance mode when using new style @expose)
instance = conn.pyroInstances.get(clazz)
if not instance:
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
instance = createInstance(clazz, instance_creator)
conn.pyroInstances[clazz] = instance
return instance
elif instance_mode == "percall":
# create and use a new instance just for this call
log.debug("instancemode %s: creating new pyro object for %s", instance_mode, clazz)
return createInstance(clazz, instance_creator)
else:
raise errors.DaemonError("invalid instancemode in registered class")
def _sendExceptionResponse(self, connection, seq, serializer_id, exc_value, tbinfo, flags=0, annotations=None):
"""send an exception back including the local traceback info"""
exc_value._pyroTraceback = tbinfo
serializer = serializers.serializers_by_id[serializer_id]
try:
data = serializer.dumps(exc_value)
except:
# the exception object couldn't be serialized, use a generic PyroError instead
xt, xv, tb = sys.exc_info()
msg = "Error serializing exception: %s. Original exception: %s: %s" % (str(xv), type(exc_value), str(exc_value))
exc_value = errors.PyroError(msg)
exc_value._pyroTraceback = tbinfo
data = serializer.dumps(exc_value)
flags |= protocol.FLAGS_EXCEPTION
annotations = dict(annotations or {})
annotations.update(self.annotations())
msg = protocol.SendingMessage(protocol.MSG_RESULT, flags, seq, serializer.serializer_id, data, annotations=annotations)
if config.LOGWIRE:
protocol.log_wiredata(log, "daemon wiredata sending (error response)", msg)
connection.send(msg.data)
def register(self, obj_or_class, objectId=None, force=False):
"""
Register a Pyro object under the given id. Note that this object is now only
known inside this daemon, it is not automatically available in a name server.
This method returns a URI for the registered object.
Pyro checks if an object is already registered, unless you set force=True.
You can register a class or an object (instance) directly.
For a class, Pyro will create instances of it to handle the remote calls according
to the instance_mode (set via @expose on the class). The default there is one object
per session (=proxy connection). If you register an object directly, Pyro will use
that single object for *all* remote calls.
"""
if objectId:
if not isinstance(objectId, str):
raise TypeError("objectId must be a string or None")
else:
objectId = "obj_" + uuid.uuid4().hex # generate a new objectId
if inspect.isclass(obj_or_class):
if not hasattr(obj_or_class, "_pyroInstancing"):
obj_or_class._pyroInstancing = ("session", None)
if not force:
if hasattr(obj_or_class, "_pyroId") and obj_or_class._pyroId != "": # check for empty string is needed for Cython
raise errors.DaemonError("object or class already has a Pyro id")
if objectId in self.objectsById:
raise errors.DaemonError("an object or class is already registered with that id")
# set some pyro attributes
obj_or_class._pyroId = objectId
obj_or_class._pyroDaemon = self
# register a custom serializer for the type to automatically return proxies
# we need to do this for all known serializers
for ser in serializers.serializers.values():
if inspect.isclass(obj_or_class):
ser.register_type_replacement(obj_or_class, pyro_obj_to_auto_proxy)
else:
ser.register_type_replacement(type(obj_or_class), pyro_obj_to_auto_proxy)
# register the object/class in the mapping
self.objectsById[obj_or_class._pyroId] = obj_or_class
return self.uriFor(objectId)
def unregister(self, objectOrId):
"""
Remove a class or object from the known objects inside this daemon.
You can unregister the class/object directly, or with its id.
"""
if objectOrId is None:
raise ValueError("object or objectid argument expected")
if not isinstance(objectOrId, str):
objectId = getattr(objectOrId, "_pyroId", None)
if objectId is None:
raise errors.DaemonError("object isn't registered")
else:
objectId = objectOrId
objectOrId = None
if objectId == core.DAEMON_NAME:
return
if objectId in self.objectsById:
del self.objectsById[objectId]
if objectOrId is not None:
del objectOrId._pyroId
del objectOrId._pyroDaemon
# Don't remove the custom type serializer because there may be
# other registered objects of the same type still depending on it.
def uriFor(self, objectOrId, nat=True):
"""
Get a URI for the given object (or object id) from this daemon.
Only a daemon can hand out proper uris because the access location is
contained in them.
Note that unregistered objects cannot be given an uri, but unregistered
object names can (it's just a string we're creating in that case).
If nat is set to False, the configured NAT address (if any) is ignored and it will
return an URI for the internal address.
"""
if not isinstance(objectOrId, str):
objectOrId = getattr(objectOrId, "_pyroId", None)
if objectOrId is None or objectOrId not in self.objectsById:
raise errors.DaemonError("object isn't registered in this daemon")
if nat:
loc = self.natLocationStr or self.locationStr
else:
loc = self.locationStr
return core.URI("PYRO:%s@%s" % (objectOrId, loc))
def resetMetadataCache(self, objectOrId, nat=True):
"""Reset cache of metadata when a Daemon has available methods/attributes
dynamically updated. Clients will have to get a new proxy to see changes"""
uri = self.uriFor(objectOrId, nat)
# can only be cached if registered, else no-op
if uri.object in self.objectsById:
registered_object = self.objectsById[uri.object]
# Clear cache regardless of how it is accessed
reset_exposed_members(registered_object, as_lists=True)
reset_exposed_members(registered_object, as_lists=False)
def proxyFor(self, objectOrId, nat=True):
"""
Get a fully initialized Pyro Proxy for the given object (or object id) for this daemon.
If nat is False, the configured NAT address (if any) is ignored.
The object or id must be registered in this daemon, or you'll get an exception.
(you can't get a proxy for an unknown object)
"""
uri = self.uriFor(objectOrId, nat)
proxy = client.Proxy(uri)
try:
registered_object = self.objectsById[uri.object]
except KeyError:
raise errors.DaemonError("object isn't registered in this daemon")
meta = get_exposed_members(registered_object)
proxy._pyroGetMetadata(known_metadata=meta)
return proxy
def close(self):
"""Close down the server and release resources"""
self.__mustshutdown.set()
self.streaming_responses = {}
if self.transportServer:
log.debug("daemon closing")
self.transportServer.close()
self.transportServer = None
def annotations(self):
"""Override to return a dict with custom user annotations to be sent with each response message."""
return {}
def combine(self, daemon):
"""
Combines the event loop of the other daemon in the current daemon's loop.
You can then simply run the current daemon's requestLoop to serve both daemons.
This works fine on the multiplex server type, but doesn't work with the threaded server type.
"""
log.debug("combining event loop with other daemon")
self.transportServer.combine_loop(daemon.transportServer)
def __annotations(self):
annotations = core.current_context.response_annotations
annotations.update(self.annotations())
return annotations
def __repr__(self):
if hasattr(self, "locationStr"):
family = socketutil.family_str(self.sock)
return "<%s.%s at 0x%x; %s - %s; %d objects>" % (self.__class__.__module__, self.__class__.__name__,
id(self), self.locationStr, family, len(self.objectsById))
else:
# daemon objects may come back from serialized form without being properly initialized (by design)
return "<%s.%s at 0x%x; unusable>" % (self.__class__.__module__, self.__class__.__name__, id(self))
def __enter__(self):
if not self.transportServer:
raise errors.PyroError("cannot reuse this object")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getstate__(self):
# A little hack to make it possible to serialize Pyro objects, because they can reference a daemon,
# but it is not meant to be able to properly serialize/deserialize Daemon objects.
return tuple()
def __setstate__(self, state):
assert len(state) == 0
__lazy_dict_iterator_types = (type({}.keys()), type({}.values()), type({}.items()))
def _streamResponse(self, data, client):
if isinstance(data, collections.Iterator) or inspect.isgenerator(data):
if config.ITER_STREAMING:
if type(data) in self.__lazy_dict_iterator_types:
raise errors.PyroError("won't serialize or stream lazy dict iterators, convert to list yourself")
stream_id = str(uuid.uuid4())
self.streaming_responses[stream_id] = (client, time.time(), 0, data)
return True, stream_id
return True, None
return False, data
def __deserializeBlobArgs(self, protocolmsg):
import marshal
blobinfo = protocolmsg.annotations["BLBI"]
blobinfo, objId, method = marshal.loads(blobinfo)
blob = client.SerializedBlob(blobinfo, protocolmsg, is_blob=True)
return objId, method, (blob,), {} # object, method, vargs, kwargs
# register the special serializers for the pyro objects
serpent.register_class(Daemon, serializers.pyro_class_serpent_serializer)
serializers.SerializerBase.register_class_to_dict(Daemon, serializers.serialize_pyro_object_to_dict, serpent_too=False)
def pyro_obj_to_auto_proxy(obj):
"""reduce function that automatically replaces Pyro objects by a Proxy"""
daemon = getattr(obj, "_pyroDaemon", None)
if daemon:
# only return a proxy if the object is a registered pyro object
return daemon.proxyFor(obj)
return obj
def get_attribute(obj, attr):
"""
Resolves an attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '``_``'.
Doesn't resolve a dotted name, because that is a security vulnerability.
It treats it as a single attribute name (and the lookup will likely fail).
"""
if is_private_attribute(attr):
raise AttributeError("attempt to access private attribute '%s'" % attr)
else:
obj = getattr(obj, attr)
if getattr(obj, "_pyroExposed", False):
return obj
raise AttributeError("attempt to access unexposed attribute '%s'" % attr)
__exposed_member_cache = {}
def reset_exposed_members(obj, only_exposed=True, as_lists=False):
"""Delete any cached exposed members forcing recalculation on next request"""
if not inspect.isclass(obj):
obj = obj.__class__
cache_key = (obj, only_exposed, as_lists)
__exposed_member_cache.pop(cache_key, None)
def get_exposed_members(obj, only_exposed=True, as_lists=False, use_cache=True):
"""
Return public and exposed members of the given object's class.
You can also provide a class directly.
Private members are ignored no matter what (names starting with underscore).
If only_exposed is True, only members tagged with the @expose decorator are
returned. If it is False, all public members are returned.
The return value consists of the exposed methods, exposed attributes, and methods
tagged as @oneway.
(All this is used as meta data that Pyro sends to the proxy if it asks for it)
as_lists is meant for python 2 compatibility.
"""
if not inspect.isclass(obj):
obj = obj.__class__
cache_key = (obj, only_exposed, as_lists)
if use_cache and cache_key in __exposed_member_cache:
return __exposed_member_cache[cache_key]
methods = set() # all methods
oneway = set() # oneway methods
attrs = set() # attributes
for m in dir(obj): # also lists names inherited from super classes
if is_private_attribute(m):
continue
v = getattr(obj, m)
if inspect.ismethod(v) or inspect.isfunction(v):
if getattr(v, "_pyroExposed", not only_exposed):
methods.add(m)
# check if the method is marked with the 'oneway' decorator:
if getattr(v, "_pyroOneway", False):
oneway.add(m)
elif inspect.isdatadescriptor(v):
func = getattr(v, "fget", None) or getattr(v, "fset", None) or getattr(v, "fdel", None)
if func is not None and getattr(func, "_pyroExposed", not only_exposed):
attrs.add(m)
# Note that we don't expose plain class attributes no matter what.
# it is a syntax error to add a decorator on them, and it is not possible
# to give them a _pyroExposed tag either.
# The way to expose attributes is by using properties for them.
# This automatically solves the protection/security issue: you have to
# explicitly decide to make an attribute into a @property (and to @expose it)
# before it becomes remotely accessible.
if as_lists:
methods = list(methods)
oneway = list(oneway)
attrs = list(attrs)
result = {
"methods": methods,
"oneway": oneway,
"attrs": attrs
}
__exposed_member_cache[cache_key] = result
return result
def get_exposed_property_value(obj, propname, only_exposed=True):
"""
Return the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
if v.fget and getattr(v.fget, "_pyroExposed", not only_exposed):
return v.fget(obj)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
def set_exposed_property_value(obj, propname, value, only_exposed=True):
"""
Sets the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
pfunc = v.fget or v.fset or v.fdel
if v.fset and getattr(pfunc, "_pyroExposed", not only_exposed):
return v.fset(obj, value)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
class _OnewayCallThread(threading.Thread):
def __init__(self, target, args, kwargs):
super(_OnewayCallThread, self).__init__(target=target, args=args, kwargs=kwargs, name="oneway-call")
self.daemon = True
self.parent_context = core.current_context.to_global()
def run(self):
core.current_context.from_global(self.parent_context)
super(_OnewayCallThread, self).run()
| StarcoderdataPython |
3215090 | <gh_stars>0
from graphviz import Digraph, Source, render
from datetime import datetime
import sys
file = sys.argv[1]
system_time = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
if __name__ == "__main__":
for eng in ("dot", "neato", "twopi", "circo", "fdp", "sfdp"):
dot = Digraph(comment=system_time + '_' + eng)
f = open(file, "r")
for index, line in enumerate(f):
data = line.split()
dot.edge(data[0], data[1], weight=data[2])
f.close()
src = Source(dot.source)
filename = system_time + '_' + eng + '.gv'
src.render(filename)
render(eng, 'svg', filename) | StarcoderdataPython |
8098497 | <gh_stars>1-10
import os
import sys
import argparse
from core.config import conf
parser = argparse.ArgumentParser()
parser.add_argument("--develop", action="store_true", help="Run app on development mode.")
parser.add_argument("--log", type=int, help="Set log level", default=None)
parser.add_argument("-d", "--daemon", action="store_true", help="Set daemon")
parser.add_argument("-r", "--reload", action="store_true", help="restart server(Daemon)")
if __name__ == "__main__":
args = parser.parse_args(sys.argv[1:])
if args.reload:
if os.path.isfile("server.pid"):
with open("server.pid", "r") as f:
os.kill(int(f.read()), 15) # SIGTERM
else:
print("no pid file, pass")
if args.develop:
conf.set("server", "daemon", False)
conf.set("server", "loop_debug", True)
conf.set("logger", "save_log", False)
conf.set("http", "rewrite_only", False)
if args.log is not None:
conf.set("logger", "level", args.log)
from core.route import get_handler
from core.server import FullAsyncServer
if conf.get("server", "daemon") or args.daemon:
from core.ext import daemon
daemon.daemon("server.pid")
try:
FullAsyncServer(
handler=get_handler()
).run()
except OSError as e:
print(e)
| StarcoderdataPython |
1940213 | from django.conf.urls import patterns, url
urlpatterns = patterns(
'api.dc.template.views',
# /template - get
url(r'^$', 'dc_template_list', name='api_dc_template_list'),
# /template/<name> - get, create, delete
url(r'^(?P<name>[A-Za-z0-9\._-]+)/$', 'dc_template', name='api_dc_template'),
)
| StarcoderdataPython |
132267 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# @Time : 2017/8/8 12:33
# @Author : play4fun
# @File : knn-find_nearest.py
# @Software: PyCharm
"""
knn-find_nearest.py:
http://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Machine_Learning_Classification_K-nearest_neighbors_k-NN.php
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Feature set containing (x,y) values of 25 known/training data
trainData = np.random.randint(0, 100, (25, 2)).astype(np.float32)
# Labels each one either Red or Blue with numbers 0 and 1
responses = np.random.randint(0, 2, (25, 1)).astype(np.float32)
# plot Reds
red = trainData[responses.ravel() == 0]
plt.scatter(red[:, 0], red[:, 1], 80, 'r', '^')
# plot Blues
blue = trainData[responses.ravel() == 1]
plt.scatter(blue[:, 0], blue[:, 1], 80, 'b', 's')
# CvKNearest instance
# knn = cv2.KNearest()
knn = cv2.ml.KNearest_create()
# trains the model
knn.train(trainData, responses)#TODO
#TypeError: only length-1 arrays can be converted to Python scalars
# New sample : (x,y)
newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32)
plt.scatter(newcomer[:, 0], newcomer[:, 1], 80, 'g', 'o')
# Finds the 3nearest neighbors and predicts responses for input vectors
ret, results, neighbours, dist = knn.find_nearest(newcomer, 3)
print("result: ", results, "\n")
print("neighbours: ", neighbours, "\n")
print("distance: ", dist)
plt.show()
| StarcoderdataPython |
8059611 | <filename>reg.py
import re
import os
import os.path as osp
import gc
import argparse
from datetime import datetime
from typing import List
import numpy as np
from skimage.transform import AffineTransform, warp
import pandas as pd
import cv2 as cv
import tifffile as tif
import dask
from metadata_handling import generate_new_metadata, get_dataset_structure
from tile_registration import get_features, register_img_pair
Image = np.ndarray
def alphaNumOrder(string):
""" Returns all numbers on 5 digits to let sort the string with numeric order.
Ex: alphaNumOrder("a6b12.125") ==> "a00006b00012.00125"
"""
return ''.join([format(int(x), '05d') if x.isdigit()
else x for x in re.split(r'(\d+)', string)])
def save_param(img_paths, out_dir, transform_matrices_flat, padding, image_shape):
transform_table = pd.DataFrame(transform_matrices_flat)
for i in transform_table.index:
dataset_name = 'dataset_{id}_{name}'.format(id=i + 1, name=os.path.basename(img_paths[i]))
transform_table.loc[i, 'name'] = dataset_name
cols = transform_table.columns.to_list()
cols = cols[-1:] + cols[:-1]
transform_table = transform_table[cols]
for i in range(0, len(padding)):
transform_table.loc[i, 'left'] = padding[i][0]
transform_table.loc[i, 'right'] = padding[i][1]
transform_table.loc[i, 'top'] = padding[i][2]
transform_table.loc[i, 'bottom'] = padding[i][3]
transform_table.loc[i, 'width'] = image_shape[1]
transform_table.loc[i, 'height'] = image_shape[0]
try:
transform_table.to_csv(out_dir + 'registration_parameters.csv', index=False)
except PermissionError:
transform_table.to_csv(out_dir + 'registration_parameters_1.csv', index=False)
def calculate_padding_size(bigger_shape, smaller_shape):
""" Find difference between shapes of bigger and smaller image. """
diff = bigger_shape - smaller_shape
if diff == 1:
dim1 = 1
dim2 = 0
elif diff % 2 != 0:
dim1 = int(diff // 2)
dim2 = int((diff // 2) + 1)
else:
dim1 = dim2 = int(diff / 2)
return dim1, dim2
def pad_to_size(target_shape, img):
if img.shape == target_shape:
return img, (0, 0, 0, 0)
else:
left, right = calculate_padding_size(target_shape[1], img.shape[1])
top, bottom = calculate_padding_size(target_shape[0], img.shape[0])
return cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, None, 0), (left, right, top, bottom)
def read_and_max_project_pages(img_path: str, tiff_pages: List[int]):
max_proj = tif.imread(img_path, key=tiff_pages[0])
if len(tiff_pages) > 1:
del tiff_pages[0]
for p in tiff_pages:
max_proj = np.maximum(max_proj, tif.imread(img_path, key=p))
return cv.normalize(max_proj, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
def estimate_registration_parameters(dataset_structure, ref_cycle_id, tile_size):
padding = []
transform_matrices = []
img_shapes = []
identity_matrix = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
img_paths = [dataset_structure[cyc]['img_path'] for cyc in dataset_structure]
for i in range(0, len(img_paths)):
with tif.TiffFile(img_paths[i]) as TF:
img_shapes.append(TF.series[0].shape[-2:])
max_size_x = max([s[1] for s in img_shapes])
max_size_y = max([s[0] for s in img_shapes])
target_shape = (max_size_y, max_size_x)
ref_img_structure = dataset_structure[ref_cycle_id]['img_structure']
ref_img_ref_channel_id = dataset_structure[ref_cycle_id]['ref_channel_id']
ref_img_path = dataset_structure[ref_cycle_id]['img_path']
ref_img_tiff_pages = list(ref_img_structure[ref_img_ref_channel_id].values())
ref_img = read_and_max_project_pages(ref_img_path, ref_img_tiff_pages)
gc.collect()
ref_img, pad = pad_to_size(target_shape, ref_img)
gc.collect()
padding.append(pad)
ref_features = get_features(ref_img, tile_size)
gc.collect()
ncycles = len(dataset_structure.keys())
for cycle in dataset_structure:
print('image {0}/{1}'.format(cycle + 1, ncycles))
img_structure = dataset_structure[cycle]['img_structure']
ref_channel_id = dataset_structure[cycle]['ref_channel_id']
img_path = dataset_structure[cycle]['img_path']
if cycle == ref_cycle_id:
transform_matrices.append(identity_matrix)
else:
mov_img_tiff_pages = list(img_structure[ref_channel_id].values())
mov_img = read_and_max_project_pages(img_path, mov_img_tiff_pages)
gc.collect()
mov_img, pad = pad_to_size(target_shape, mov_img)
padding.append(pad)
transform_matrix = register_img_pair(ref_features, get_features(mov_img, tile_size))
transform_matrices.append(transform_matrix)
gc.collect()
return transform_matrices, target_shape, padding
def transform_imgs(dataset_structure, out_dir, target_shape, transform_matrices, is_stack):
print('transforming images')
identity_matrix = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
output_path = osp.join(out_dir, 'out.tif')
input_img_paths = [dataset_structure[cyc]['img_path'] for cyc in dataset_structure]
if is_stack:
with tif.TiffFile(input_img_paths[0]) as TF:
old_meta = TF.ome_metadata
new_meta = old_meta
else:
new_meta = generate_new_metadata(input_img_paths, target_shape)
ncycles = len(dataset_structure.keys())
nzplanes = {cyc: len(dataset_structure[cyc]['img_structure'][0].keys()) for cyc in dataset_structure}
max_zplanes = max(nzplanes.values())
TW = tif.TiffWriter(output_path, bigtiff=True)
for cyc in dataset_structure:
print('image {0}/{1}'.format(cyc + 1, ncycles))
img_path = dataset_structure[cyc]['img_path']
TF = tif.TiffFile(img_path)
transform_matrix = transform_matrices[cyc]
img_structure = dataset_structure[cyc]['img_structure']
for channel in img_structure:
for zplane in img_structure[channel]:
page = img_structure[channel][zplane]
img = TF.asarray(key=page)
original_dtype = img.dtype
img, _ = pad_to_size(target_shape, img)
gc.collect()
if not np.array_equal(transform_matrix, identity_matrix):
homogenous_transform_matrix = np.append(transform_matrix, [[0, 0, 1]], axis=0)
inv_matrix = np.linalg.pinv(homogenous_transform_matrix) # Using partial inverse to handle singular matrices
AT = AffineTransform(inv_matrix)
img = warp(img, AT, output_shape=img.shape, preserve_range=True).astype(original_dtype)
gc.collect()
TW.write(img, photometric='minisblack', description=new_meta)
page += 1
gc.collect()
if nzplanes[cyc] < max_zplanes:
diff = max_zplanes - nzplanes[cyc]
empty_page = np.zeros_like(img)
for a in range(0, diff):
TW.write(empty_page, photometric='minisblack', description=new_meta)
del empty_page
gc.collect()
del img
gc.collect()
TF.close()
TW.close()
def check_input_size(img_paths: List[str], is_stack: bool):
if len(img_paths) == 1:
if is_stack:
pass
else:
raise ValueError('You need to provide at least two images to do a registration.')
elif len(img_paths) > 1:
if is_stack:
raise ValueError('Too many input images. ' +
'When flag --stack enabled only one image can be used')
else:
pass
else:
raise ValueError('You need to provide at least two images to do a registration.')
def main(img_paths: list, ref_img_id: int, ref_channel: str,
out_dir: str, n_workers: int, tile_size: int, stack: bool, estimate_only: bool, load_param: str):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not out_dir.endswith('/'):
out_dir = out_dir + '/'
st = datetime.now()
print('\nstarted', st)
if n_workers == 1:
dask.config.set({'scheduler': 'synchronous'})
else:
dask.config.set({'num_workers': n_workers, 'scheduler': 'processes'})
is_stack = stack
ref_channel = ref_channel.lower()
check_input_size(img_paths, is_stack)
dataset_structure = get_dataset_structure(img_paths, ref_channel, is_stack)
if load_param == 'none':
transform_matrices, target_shape, padding = estimate_registration_parameters(dataset_structure, ref_img_id, tile_size)
else:
reg_param = pd.read_csv(load_param)
target_shape = (reg_param.loc[0, 'height'], reg_param.loc[0, 'width'])
transform_matrices = []
padding = []
for i in reg_param.index:
matrix = reg_param.loc[i, ['0', '1', '2', '3', '4', '5']].to_numpy().reshape(2, 3).astype(np.float32)
pad = reg_param.loc[i, ['left', 'right', 'top', 'bottom']].to_list()
transform_matrices.append(matrix)
padding.append(pad)
if not estimate_only:
transform_imgs(dataset_structure, out_dir, target_shape, transform_matrices, is_stack)
transform_matrices_flat = [M.flatten() for M in transform_matrices]
img_paths2 = [dataset_structure[cyc]['img_path'] for cyc in dataset_structure]
save_param(img_paths2, out_dir, transform_matrices_flat, padding, target_shape)
fin = datetime.now()
print('\nelapsed time', fin - st)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image registration')
parser.add_argument('-i', type=str, nargs='+', required=True,
help='paths to images you want to register separated by space.')
parser.add_argument('-r', type=int, required=True,
help='reference image id, e.g. if -i 1.tif 2.tif 3.tif, and you ref image is 1.tif, then -r 0 (starting from 0)')
parser.add_argument('-c', type=str, required=True,
help='reference channel name, e.g. DAPI. Enclose in double quotes if name consist of several words e.g. "Atto 490LS".')
parser.add_argument('-o', type=str, required=True,
help='directory to output registered image.')
parser.add_argument('-n', type=int, default=1,
help='multiprocessing: number of processes, default 1')
parser.add_argument('--tile_size', type=int, default=1000, help='size of a side of a square tile, ' +
'e.g. --tile_size 1000 = tile with dims 1000x1000px')
parser.add_argument('--stack', action='store_true',
help='add this flag if input is image stack instead of image list')
parser.add_argument('--estimate_only', action='store_true',
help='add this flag if you want to get only registration parameters and do not want to process images.')
parser.add_argument('--load_param', type=str, default='none',
help='specify path to csv file with registration parameters')
args = parser.parse_args()
main(args.i, args.r, args.c, args.o, args.n, args.tile_size, args.stack, args.estimate_only, args.load_param)
| StarcoderdataPython |
8002225 | """Test Hue migration logic."""
from unittest.mock import patch
from homeassistant.components import hue
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import MockConfigEntry
async def test_migrate_api_key(hass):
"""Test if username gets migrated to api_key."""
config_entry = MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0", "api_version": 2, "username": "abcdefgh"},
)
await hue.migration.check_migration(hass, config_entry)
# the username property should have been migrated to api_key
assert config_entry.data == {
"host": "0.0.0.0",
"api_version": 2,
"api_key": "<KEY>",
}
async def test_auto_switchover(hass):
"""Test if config entry from v1 automatically switches to v2."""
config_entry = MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0", "api_version": 1, "username": "abcdefgh"},
)
with patch.object(hue.migration, "is_v2_bridge", retun_value=True), patch.object(
hue.migration, "handle_v2_migration"
) as mock_mig:
await hue.migration.check_migration(hass, config_entry)
assert len(mock_mig.mock_calls) == 1
# the api version should now be version 2
assert config_entry.data == {
"host": "0.0.0.0",
"api_version": 2,
"api_key": "<KEY>",
}
async def test_light_entity_migration(
hass, mock_bridge_v2, mock_config_entry_v2, v2_resources_test_data
):
"""Test if entity schema for lights migrates from v1 to v2."""
config_entry = mock_bridge_v2.config_entry = mock_config_entry_v2
ent_reg = er.async_get(hass)
dev_reg = dr.async_get(hass)
# create device/entity with V1 schema in registry
device = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(hue.DOMAIN, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")},
)
ent_reg.async_get_or_create(
"light",
hue.DOMAIN,
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
suggested_object_id="migrated_light_1",
device_id=device.id,
)
# now run the migration and check results
await mock_bridge_v2.api.load_test_data(v2_resources_test_data)
await hass.async_block_till_done()
with patch(
"homeassistant.components.hue.migration.HueBridgeV2",
return_value=mock_bridge_v2.api,
):
await hue.migration.handle_v2_migration(hass, config_entry)
# migrated device should have new identifier (guid) and old style (mac)
migrated_device = dev_reg.async_get(device.id)
assert migrated_device is not None
assert migrated_device.identifiers == {
(hue.DOMAIN, "0b216218-d811-4c95-8c55-bbcda50f9d50"),
(hue.DOMAIN, "00:17:88:01:09:aa:bb:65"),
}
# the entity should have the new identifier (guid)
migrated_entity = ent_reg.async_get("light.migrated_light_1")
assert migrated_entity is not None
assert migrated_entity.unique_id == "02cba059-9c2c-4d45-97e4-4f79b1bfbaa1"
async def test_sensor_entity_migration(
hass, mock_bridge_v2, mock_config_entry_v2, v2_resources_test_data
):
"""Test if entity schema for sensors migrates from v1 to v2."""
config_entry = mock_bridge_v2.config_entry = mock_config_entry_v2
ent_reg = er.async_get(hass)
dev_reg = dr.async_get(hass)
# create device with V1 schema in registry for Hue motion sensor
device_mac = "00:17:aa:bb:cc:09:ac:c3"
device = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(hue.DOMAIN, device_mac)}
)
# mapping of device_class to new id
sensor_mappings = {
("temperature", "sensor", "66466e14-d2fa-4b96-b2a0-e10de9cd8b8b"),
("illuminance", "sensor", "d504e7a4-9a18-4854-90fd-c5b6ac102c40"),
("battery", "sensor", "669f609d-4860-4f1c-bc25-7a9cec1c3b6c"),
("motion", "binary_sensor", "b6896534-016d-4052-8cb4-ef04454df62c"),
}
# create entities with V1 schema in registry for Hue motion sensor
for dev_class, platform, new_id in sensor_mappings:
ent_reg.async_get_or_create(
platform,
hue.DOMAIN,
f"{device_mac}-{dev_class}",
suggested_object_id=f"hue_migrated_{dev_class}_sensor",
device_id=device.id,
device_class=dev_class,
)
# now run the migration and check results
await mock_bridge_v2.api.load_test_data(v2_resources_test_data)
await hass.async_block_till_done()
with patch(
"homeassistant.components.hue.migration.HueBridgeV2",
return_value=mock_bridge_v2.api,
):
await hue.migration.handle_v2_migration(hass, config_entry)
# migrated device should have new identifier (guid) and old style (mac)
migrated_device = dev_reg.async_get(device.id)
assert migrated_device is not None
assert migrated_device.identifiers == {
(hue.DOMAIN, "2330b45d-6079-4c6e-bba6-1b68afb1a0d6"),
(hue.DOMAIN, device_mac),
}
# the entities should have the correct V2 identifier (guid)
for dev_class, platform, new_id in sensor_mappings:
migrated_entity = ent_reg.async_get(
f"{platform}.hue_migrated_{dev_class}_sensor"
)
assert migrated_entity is not None
assert migrated_entity.unique_id == new_id
async def test_group_entity_migration(
hass, mock_bridge_v2, mock_config_entry_v2, v2_resources_test_data
):
"""Test if entity schema for grouped_lights migrates from v1 to v2."""
config_entry = mock_bridge_v2.config_entry = mock_config_entry_v2
ent_reg = er.async_get(hass)
# create (deviceless) entity with V1 schema in registry
ent_reg.async_get_or_create(
"light",
hue.DOMAIN,
"3",
suggested_object_id="hue_migrated_grouped_light",
config_entry=config_entry,
)
# now run the migration and check results
await mock_bridge_v2.api.load_test_data(v2_resources_test_data)
await hass.async_block_till_done()
with patch(
"homeassistant.components.hue.migration.HueBridgeV2",
return_value=mock_bridge_v2.api,
):
await hue.migration.handle_v2_migration(hass, config_entry)
# the entity should have the new identifier (guid)
migrated_entity = ent_reg.async_get("light.hue_migrated_grouped_light")
assert migrated_entity is not None
assert migrated_entity.unique_id == "e937f8db-2f0e-49a0-936e-027e60e15b34"
| StarcoderdataPython |
8004290 | <gh_stars>0
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param n: An integer
@return: The head of linked list.
"""
def removeNthFromEnd(self, head, n):
if not head or n < 0:
return
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
i = 0
while i < n and fast:
fast = fast.next
i += 1
# if n is greater than the total number of lists
if i != n:
return head
# fast is on the end of the list
# slow is on the previous of nth node from the end of list
while slow.next and fast.next:
slow = slow.next
fast = fast.next
# disconnect the node,
slow.next = slow.next.next
return dummy.next | StarcoderdataPython |
3384581 | <filename>tests/backup_test.py
import io
from fixtures import *
import tarfile
import gzip
def test_backing_up_config_dir():
compressed_tar_blob = manager.backup.config_dir()
tar = tarfile.open(fileobj=io.BytesIO(compressed_tar_blob), mode='r:*')
# The configuration directory has 9 files in it, plus the config directory
assert len(tar.getmembers()) == 9+1
| StarcoderdataPython |
6664316 | """Test the variable meanings dictionary."""
import os
import os.path as op
import json
import sp_experiment
from sp_experiment.define_variable_meanings import (make_description_task_json,
make_events_json_dict,
make_data_dir)
def test_make_events_json_dict():
"""Test the variable meanings."""
events_json_dict = make_events_json_dict()
assert isinstance(events_json_dict, dict)
# The keys for "value levels" should be str of int
for key in events_json_dict['value']['Levels'].keys():
assert isinstance(key, int)
assert key >= 0 and key <= 255
# Also test descriptions task json
events_json_dict = make_description_task_json()
assert isinstance(events_json_dict, dict)
def test_json():
"""Test json file."""
_, data_dir = make_data_dir()
fname = 'task-sp_events.json'
fpath = op.join(data_dir, fname)
if not op.exists(data_dir):
os.makedirs(data_dir)
# In json does not exist, write it.
if not op.exists(fpath):
events_json_dict = make_events_json_dict()
with open(fpath, 'w') as fout:
json.dump(obj=events_json_dict, fp=fout,
sort_keys=False, indent=4)
# Load json and check for integrity
with open(fpath, 'r') as fin:
try:
json.load(fin)
except ValueError as e:
print('invalid json: {}'.format(e))
raise
def test_make_data_dir():
"""Test making of datadir and copying over of relevant files."""
init_dir, data_dir = make_data_dir()
assert init_dir == op.dirname(sp_experiment.__file__)
assert op.exists(data_dir)
assert op.exists(op.join(data_dir, 'task-sp_events.json'))
assert op.exists(op.join(data_dir, 'sub-999_task-spactive_events.tsv'))
| StarcoderdataPython |
3488669 | <reponame>bharshbarger/python_toolbox
import subprocess
def tmux_execute(session_name, command):
proc = subprocess.Popen(['tmux', 'new-session', '-d', '-s', '{}'.format(session_name),\
'{}'.format(command)], stdout=subprocess.PIPE)
(out, err) = proc.communicate()
| StarcoderdataPython |
206763 | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from ..base import BaseNumericEncoder
from ...helper import batching, train_required
class VladEncoder(BaseNumericEncoder):
batch_size = 2048
def __init__(self, num_clusters: int, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_clusters = num_clusters
self.centroids = None
def kmeans_train(self, vecs):
import faiss
kmeans = faiss.Kmeans(vecs.shape[1], self.num_clusters, niter=5, verbose=False)
kmeans.train(vecs)
self.centroids = kmeans.centroids
def kmeans_pred(self, vecs):
vecs = np.reshape(vecs, [vecs.shape[0], 1, 1, vecs.shape[1]])
dist = np.sum(np.square(vecs - self.centroids), -1)
return np.argmax(-dist, axis=-1).astype(np.int64)
@batching
def train(self, vecs: np.ndarray, *args, **kwargs):
assert len(vecs) > self.num_clusters, 'number of data should be larger than number of clusters'
vecs_ = copy.deepcopy(vecs)
vecs_ = np.concatenate((list(vecs_[i] for i in range(len(vecs_)))), axis=0)
self.kmeans_train(vecs_)
@train_required
@batching
def encode(self, vecs: np.ndarray, *args, **kwargs) -> np.ndarray:
vecs_ = copy.deepcopy(vecs)
vecs_ = np.concatenate((list(vecs_[i] for i in range(len(vecs_)))), axis=0)
knn_output = self.kmeans_pred(vecs_)
knn_output = [knn_output[i:i + vecs.shape[1]] for i in range(0, len(knn_output), vecs.shape[1])]
output = []
for chunk_count, chunk in enumerate(vecs):
res = np.zeros((self.centroids.shape[0], self.centroids.shape[1]))
for frame_count, frame in enumerate(chunk):
center_index = knn_output[chunk_count][frame_count][0]
res[center_index] += (frame - self.centroids[center_index])
output.append(res)
output = np.array(list(map(lambda x: x.reshape(1, -1), output)), dtype=np.float32)
output = np.squeeze(output, axis=1)
return output
def _copy_from(self, x: 'VladEncoder') -> None:
self.num_clusters = x.num_clusters
self.centroids = x.centroids
| StarcoderdataPython |
3354120 | <reponame>Satanepigone/osr2mp4-app<gh_stars>100-1000
from PyQt5.QtWidgets import QLabel
class ParentTitle(QLabel):
def __init__(self, default_fontsize=1, color="white"):
super().__init__()
self.default_width = 1
self.default_height = 1
self.default_fontsize = default_fontsize
self.color = color
self.setStyleSheet("font: bold %ipx;color:%s;" % (self.default_fontsize, color))
def width(self):
return self.fontMetrics().boundingRect(self.text).width() * 2
def height(self):
return self.fontMetrics().boundingRect(self.text).height() * 2
def setFixedHeight(self, p_int):
scale = p_int / self.default_height
self.setStyleSheet("font-size: %ipx; font-weight: bold; color: %s;" % (int(scale * self.default_fontsize), self.color))
super().setFixedHeight(p_int)
def updatevalue(self):
pass
class Titles(ParentTitle):
def __init__(self, title, pixmap=None, parent=None, color="white"):
self.default_fontsize = 24
super().__init__(self.default_fontsize, color=color)
self.text = title + " "
self.setText(title)
font = self.font()
font.setPointSize(100)
self.setFont(font)
self.default_width = super().width()
self.default_height = super().height()
class SmallTitles(ParentTitle):
def __init__(self, title, parent=None, color="white"):
self.default_fontsize = 14
super().__init__(self.default_fontsize, color=color)
self.text = title + " "
self.setText(title)
self.default_width = super().width()
self.default_height = super().height()
| StarcoderdataPython |
5004514 | <gh_stars>1-10
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../../")
import soundscapy.ssid.parameters as par
from pytest import raises
def test_surveyvar_dict():
for key, val in par.SURVEY_VARS.items():
if key in ["Traffic", "Other", "Human", "Natural"]:
assert min(val["levels"].keys()) == 1
assert max(val["levels"].keys()) == 5
elif key in [
"pleasant",
"chaotic",
"vibrant",
"uneventful",
"calm",
"annoying",
"eventful",
"monotonous",
]:
assert min(val["levels"].keys()) == 1
assert max(val["levels"].keys()) == 5
elif key in ["overall", "appropriateness", "loudness", "visit_again"]:
assert min(val["levels"].keys()) == 1
assert max(val["levels"].keys()) == 5
elif key in ["who01", "who02", "who03", "who04", "who05"]:
assert min(val["levels"].keys()) == 0
assert max(val["levels"].keys()) == 5
if __name__ == "__main__":
test_surveyvar_dict()
| StarcoderdataPython |
5046682 | <filename>src/py42/settings/debug.py
import logging
import sys
class _DebugSettings(object):
INFO = logging.INFO
DEBUG = logging.DEBUG
TRACE = logging.DEBUG
NONE = logging.NOTSET
def __init__(self):
self.logger = logging.getLogger("py42")
self.logger.addHandler(logging.StreamHandler(sys.stderr))
@property
def level(self):
return self.logger.level
@level.setter
def level(self, level):
self.logger.setLevel(level)
sys.modules[__name__] = _DebugSettings()
| StarcoderdataPython |
1659916 | #import helper
#import helper as h
from helper import validate_and_execute, user_input_message
user_input = ""
while user_input != "exit":
user_input = input(user_input_message)
days_and_unit = user_input.split(":")
days_and_unit_dict = {"days": days_and_unit[0], "unit": days_and_unit[1]}
validate_and_execute(days_and_unit_dict)
| StarcoderdataPython |
5195958 | <filename>custom_components/ha-pyamdgpuinfo/const.py
""" The AMD GPU component."""
from homeassistant.components.sensor import (
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
)
from homeassistant.const import (
CONF_RESOURCES,
DATA_MEBIBYTES,
PERCENTAGE,
TEMP_CELSIUS,
POWER_WATT,
ATTR_VOLTAGE,
)
DEVICE_ID = "0"
DOMAIN = "ha_pyamdgpuinfo"
MANUFACTURER = "AMD"
MODEL = "Unknown"
NAME = "AMD GPU"
PLATFORMS = ["sensor"]
# Schema: [name, unit of measurement, icon, device class, flag if mandatory arg]
SENSOR_TYPES = {
"gpu_temperature": ["GPU temperature", TEMP_CELSIUS, "mdi:thermometer", DEVICE_CLASS_TEMPERATURE, False, ],
"vram_size": ["VRAM Size", DATA_MEBIBYTES, "mdi:memory", None, False],
"vram_usage": ["VRAM Usage", DATA_MEBIBYTES, "mdi:memory", None, False],
"vram_percent": ["VRAM Usage (percent)", PERCENTAGE, "mdi:memory", None, False],
"gtt_size": ["GTT Size", DATA_MEBIBYTES, "mdi:memory", None, False],
"gtt_usage": ["GTT Usage", DATA_MEBIBYTES, "mdi:memory", None, False],
"gtt_percent": ["GTT Usage (percent)", PERCENTAGE, "mdi:memory", None, False],
"sclk_max": ["Shader Clock Max", "GHz", "mdi:chip", None, False],
"sclk_usage": ["Shader Clock Usage", "GHz", "mdi:chip", None, False],
"sclk_percent": ["Shader Clock Usage (percent)", PERCENTAGE, "mdi:chip", None, False],
"mclk_max": ["Memory Clock Max", "GHz", "mdi:chip", None, False],
"mclk_usage": ["Memory Clock Usage", "GHz", "mdi:chip", None, False],
"mclk_percent": ["Memory Clock Usage (percent)", PERCENTAGE, "mdi:chip", None, False],
"query_load": ["Overall GPU load", PERCENTAGE, "mdi:chip", None, False],
"query_power": ["Power consumption", POWER_WATT, "mdi:lightning-bolt", DEVICE_CLASS_POWER, False],
"query_northbridge_voltage": ["Northbrige Voltage", ATTR_VOLTAGE, "mdi:lightning-bolt", DEVICE_CLASS_VOLTAGE, False],
"query_graphics_voltage": ["Graphics Voltage", ATTR_VOLTAGE, "mdi:lightning-bolt", DEVICE_CLASS_VOLTAGE, False],
"texture_addresser": ["Texture Addresser (percent)", PERCENTAGE, "mdi:chip", None, False],
"shader_export": ["Shader Export (percent)", PERCENTAGE, "mdi:chip", None, False],
"shader_interpolator": ["Shader Interpolator (percent)", PERCENTAGE, "mdi:chip", None, False],
"primitive_assembly": ["Primitive Assembly (percent)", PERCENTAGE, "mdi:chip", None, False],
"depth_block": ["Depth Block (percent)", PERCENTAGE, "mdi:chip", None, False],
"colour_block": ["Colour Block (percent)", PERCENTAGE, "mdi:chip", None, False],
"graphics_pipe": ["GPU use (percent)", PERCENTAGE, "mdi:chip", None, False]
}
| StarcoderdataPython |
8137713 | <filename>python/package/open_waters/document/__init__.py<gh_stars>1-10
class Document:
__baseEndpoint = 'documents'
__client = None
def __init__(self, __client):
self.__client = __client
def upload(self, file_path: str) -> dict:
""" Upload document
:rtype: dict
:param file_path:
:return: Uploaded document
"""
files = {'file': open(file_path, 'rb')}
r = self.__client.upload(self.__baseEndpoint + "/upload", files)
return r
| StarcoderdataPython |
8198317 | # 763. Partition Labels
# Runtime: 40 ms, faster than 75.24% of Python3 online submissions for Partition Labels.
# Memory Usage: 14.4 MB, less than 24.03% of Python3 online submissions for Partition Labels.
class Solution:
# Greedy
def partitionLabels(self, s: str) -> list[int]:
last = {c: i for i, c in enumerate(s)}
j = anchor = 0
ans = []
for i, c in enumerate(s):
j = max(j, last[c])
if i == j:
ans.append(i - anchor + 1)
anchor = i + 1
return ans | StarcoderdataPython |
377194 | #!/usr/bin/env python
from django.apps import AppConfig
class DjangoMakeSuperUserConfig(AppConfig):
name = 'django_makesuperuser'
| StarcoderdataPython |
4915353 | import io
import json
import os
import copy
import os.path
from datetime import datetime
from io import BytesIO
import yaml
from PyPDF2 import PdfFileReader
from PyPDF2.generic import IndirectObject
from lxml import etree
from .flavors import xml_flavor
from .logger import logger
from .pdfwriter import FacturXPDFWriter
# Python 2 and 3 compat
try:
file_types = (file, io.IOBase)
except NameError:
file_types = (io.IOBase,)
unicode = str
__all__ = ['FacturX']
class FacturX(object):
"""Represents an electronic PDF invoice with embedded XML metadata following the
Factur-X standard.
The source of truth is always the underlying XML tree. No copy of field
data is kept. Manipulation of the XML tree is either done via Python-style
dict access (available for the most common fields) or by directly accessing
the XML data on `FacturX.xml`.
Attributes:
- xml: xml tree of machine-readable representation.
- pdf: underlying graphical PDF representation.
- flavor: which flavor (Factur-x) to use.
"""
def __init__(self, pdf_invoice, flavor='factur-x', level='minimum'):
# Read PDF from path, pointer or string
if isinstance(pdf_invoice, str) and pdf_invoice.endswith('.pdf') and os.path.isfile(pdf_invoice):
with open(pdf_invoice, 'rb') as f:
pdf_file = BytesIO(f.read())
elif isinstance(pdf_invoice, file_types):
pdf_file = pdf_invoice
else:
raise TypeError(
"The first argument of the method get_facturx_xml_from_pdf must "
"be either a string or a file (it is a %s)." % type(pdf_invoice))
xml = self._xml_from_file(pdf_file)
self.pdf = pdf_file
# PDF has metadata embedded
if xml is not None:
# 'Read existing XML from PDF
self.xml = xml
self.flavor = xml_flavor.XMLFlavor(xml)
else:
# No metadata embedded. Create from template.
# 'PDF does not have XML embedded. Adding from template.'
self.flavor, self.xml = xml_flavor.XMLFlavor.from_template(flavor, level)
self.flavor.check_xsd(self.xml)
self._namespaces = self.xml.nsmap
self.already_added_field = {}
def read_xml(self):
"""Use XML data from external file. Replaces existing XML or template."""
pass
def _xml_from_file(self, pdf_file):
pdf = PdfFileReader(pdf_file)
pdf_root = pdf.trailer['/Root']
if '/Names' not in pdf_root or '/EmbeddedFiles' not in pdf_root['/Names']:
# 'No existing XML file found.'
return None
for file in pdf_root['/Names']['/EmbeddedFiles']['/Names']:
if isinstance(file, IndirectObject):
obj = file.getObject()
if obj['/F'] in xml_flavor.valid_xmp_filenames():
xml_root = etree.fromstring(obj['/EF']['/F'].getData())
xml_content = xml_root
return xml_content
def __getitem__(self, field_name):
path = self.flavor.get_xml_path(field_name)
value = self.xml.xpath(path, namespaces=self._namespaces)
if value:
value = value[0].text
if 'date' in field_name:
value = datetime.strptime(value, '%Y%m%d')
return value
def __setitem__(self, field_name, value):
path = self.flavor.get_xml_path(field_name)
res = self.xml.xpath(path, namespaces=self._namespaces)
if not res:
# The node is not defined at all in the parsed xml
logger.error("{} is not defined in {}".format(path, self.flavor.name))
return
current_el = res[-1]
parent_tag = current_el.getparent().tag
self._handle_duplicated_node(current_el, parent_tag)
self._write_element(current_el, field_name, value)
self._save_to_registry(current_el, parent_tag)
def _handle_duplicated_node(self, current_el, parent_tag):
# method meant to handle cardinality 1.n (ApplicableTradeTax or IncludedSupplyChainTradeLineItem)
# we get the sibling and duplicate it
if parent_tag in self.already_added_field and current_el in self.already_added_field[parent_tag]:
parent_el = current_el.getparent()
parent_el.addnext(copy.copy(parent_el))
def _write_element(self, current_el, field_name, value):
# if we have type cast worries, it must be handled here
if 'date' in field_name:
assert isinstance(value, datetime), 'Please pass date values as DateTime() object.'
value = value.strftime('%Y%m%d')
current_el.attrib['format'] = '102'
current_el.text = value
else:
current_el.text = str(value)
def _save_to_registry(self, current_el, parent_tag):
if parent_tag not in self.already_added_field:
self.already_added_field[parent_tag] = []
elif current_el in self.already_added_field[parent_tag]:
self.already_added_field[parent_tag] = [el for el in self.already_added_field[parent_tag] if
el != current_el]
else:
self.already_added_field[parent_tag].append(current_el)
def is_valid(self):
"""Make every effort to validate the current XML.
Checks:
- all required fields are present and have values.
- XML is valid
- ...
Returns: true/false (validation passed/failed)
"""
# validate against XSD
try:
self.flavor.check_xsd(self.xml)
except Exception:
return False
# Check for required fields
fields_data = xml_flavor.FIELDS
for field in fields_data.keys():
if fields_data[field]['_required']:
r = self.xml.xpath(fields_data[field]['_path'][self.flavor.name], namespaces=self._namespaces)
if not len(r) or r[0].text is None:
if '_default' in fields_data[field].keys():
self[field] = fields_data[field]['_default']
else:
logger.error("Required field '%s' is not present", field)
return False
# Check for codes (ISO:3166, ISO:4217)
codes_to_check = [
('currency', 'currency'),
('country', 'seller_country'),
('country', 'buyer_country'),
('country', 'shipping_country')
]
for code_type, field_name in codes_to_check:
if self[field_name] and not self.flavor.valid_code(code_type, self[field_name]):
logger.error("Field %s is not a valid %s code." % (field_name, code_type))
return False
return True
def write_pdf(self, path):
pdfwriter = FacturXPDFWriter(self)
with open(path, 'wb') as output_f:
pdfwriter.write(output_f)
return True
@property
def xml_str(self):
"""Calculate MD5 checksum of XML file. Used for PDF attachment."""
return etree.tostring(self.xml, pretty_print=True)
def write_xml(self, path):
with open(path, 'wb') as f:
f.write(self.xml_str)
def to_dict(self):
"""Get all available fields as dict."""
fields_data = xml_flavor.FIELDS
flavor = self.flavor.name
output_dict = {}
for field in fields_data.keys():
try:
if fields_data[field]['_path'][flavor] is not None:
r = self.xml.xpath(fields_data[field]['_path'][flavor],
namespaces=self._namespaces)
output_dict[field] = r[0].text
except IndexError:
output_dict[field] = None
return output_dict
def write_json(self, json_file_path='output.json'):
json_output = self.to_dict()
if self.is_valid():
with open(json_file_path, 'w') as json_file:
logger.info("Exporting JSON to %s", json_file_path)
json.dump(json_output, json_file, indent=4, sort_keys=True)
def write_yaml(self, yml_file_path='output.yml'):
yml_output = self.to_dict()
if self.is_valid():
with open(yml_file_path, 'w') as yml_file:
logger.info("Exporting YAML to %s", yml_file_path)
yaml.dump(yml_output, yml_file, default_flow_style=False)
| StarcoderdataPython |
8102263 | import numpy
from scipy.ndimage import convolve
from skimage.data import camera
from skimage.util import random_noise
from dexp.processing.filters.fft_convolve import fft_convolve
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
def demo_fft_convolve_numpy():
with NumpyBackend():
_demo_fft_convolve()
def demo_fft_convolve_cupy():
try:
with CupyBackend():
_demo_fft_convolve()
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def _demo_fft_convolve():
image = camera().astype(numpy.float32) / 255
noisy = random_noise(image, mode="gaussian", var=0.005, seed=0, clip=False)
noisy = random_noise(noisy, mode="s&p", amount=0.03, seed=0, clip=False).astype(numpy.float32)
psf = numpy.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]]).astype(numpy.float32)
result = fft_convolve(noisy, psf)
reference_result = convolve(noisy, psf)
from napari import Viewer, gui_qt
with gui_qt():
def _c(array):
return Backend.to_numpy(array)
viewer = Viewer()
viewer.add_image(_c(image), name="image")
viewer.add_image(_c(noisy), name="noisy")
viewer.add_image(_c(psf), name="psf")
viewer.add_image(_c(reference_result), name="reference_result")
viewer.add_image(_c(result), name="result")
if __name__ == "__main__":
demo_fft_convolve_cupy()
demo_fft_convolve_numpy()
| StarcoderdataPython |
4932465 | from opensfm import dataset
from numpy import ndarray
from typing import Dict, Tuple
def get_all_track_observations(gcp_database, track_id: str) -> Dict[str, ndarray]:
print(f"Getting all observations of track {track_id}")
data = dataset.DataSet(gcp_database.path)
tracks_manager = data.load_tracks_manager()
track_obs = tracks_manager.get_track_observations(track_id)
return {shot_id: obs.point for shot_id, obs in track_obs.items()}
def get_tracks_visible_in_image(gcp_database, image_key, min_len: int=5) -> Dict[str, Tuple[ndarray, int]]:
print(f"Getting track observations visible in {image_key}")
data = dataset.DataSet(gcp_database.path)
tracks_manager = data.load_tracks_manager()
reconstructions = data.load_reconstruction()
for reconstruction in reconstructions:
if image_key in reconstruction.shots:
break
out = {}
for track_id in reconstruction.points:
track_obs = tracks_manager.get_track_observations(track_id)
if len(track_obs) < min_len:
continue
for shot_id, obs in track_obs.items():
if shot_id == image_key:
out[track_id] = obs.point, len(track_obs)
return out
| StarcoderdataPython |
3420082 | from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/matchcoaches", methods=['POST'])
def hello():
return jsonify(coaches=[1,2,3], error=False)
| StarcoderdataPython |
8139808 | <gh_stars>0
import PIL
import torch
import numpy as np
from PIL import Image
class Transformer(object):
""""Transform"""
def __init__(self,):
pass
def __call__(self, img, bbox=None):
pass
class Compose(Transformer):
"""Compose transforms"""
def __init__(self, transforms=[]):
super().__init__()
self.transforms=transforms
def __call__(self, img, bbox=None):
if bbox is None:
for transform in self.transforms:
img = transform(img, bbox)
return img
for transform in self.transforms:
img, bbox = transform(img, bbox)
return img, bbox
class Resize(Transformer):
"""Resize image and bbox"""
def __init__(self, size=(320, 320)):
"""
:param: size (default: tuple=(320, 320)) - target size
"""
super().__init__()
self.size=size
def __call__(self, img, bbox=None):
W, H = img.size
img = img.resize(self.size)
sW = self.size[0]/W
sH = self.size[1]/H
if bbox is None:
return img
bbox = [[int(b[0]*sW), int(b[1]*sH),
int(b[2]*sW), int(b[3]*sH), b[4]] for b in bbox]
return img, bbox
class Rotate(Transformer):
"""Rotate image and bbox"""
def __init__(self, p=0.5):
"""
:param: p (default: float=0.5) - probability of rotation
"""
super().__init__()
self.p = p
def __call__(self, img, bbox=None):
if np.random.uniform() < self.p:
img = img.rotate(90)
W, H = img.size
if bbox is not None:
bbox = [[b[1], W - b[2], b[3], W - b[0], b[4]] for b in bbox]
if bbox is None:
return img
return img, bbox
class HorizontalFlip(Transformer):
"""Horizontal flip of image and bbox"""
def __init__(self, p=0.5):
"""
:param: p (default: float=0.5) - probability of horizontal flip
"""
super().__init__()
self.p = p
def __call__(self, img, bbox=None):
if np.random.uniform() < self.p:
img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)
W, H = img.size
if bbox is not None:
bbox = [[W - b[2], b[1], W - b[0], b[3], b[4]] for b in bbox]
if bbox is None:
return img
return img, bbox
class VerticalFlip(Transformer):
"""Vertical flip of image and bbox"""
def __init__(self, p=0.5):
"""
:param: p (default: float=0.5) - probability of vertical flip
"""
super().__init__()
self.p = p
def __call__(self, img, bbox=None):
if np.random.uniform() < self.p:
img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)
W, H = img.size
if bbox is not None:
bbox = [[b[0], H - b[3], b[2], H - b[1], b[4]] for b in bbox]
if bbox is None:
return img
return img, bbox
class ToTensor(Transformer):
"""Convert image and bbox to torch.tensor"""
def __init__(self,):
super().__init__()
def __call__(self, img, bbox=None):
img = np.array(img)/255
img = torch.from_numpy(img).permute(2, 0, 1)
if bbox is None:
return img
bbox = torch.tensor(bbox)
return img, bbox
class ToImage(Transformer):
"""Convert image tensor to PIL.Image and bbox to list"""
def __init__(self,):
super().__init__()
def __call__(self, img, bbox=None):
img = img.permute(1,2,0).numpy()
img = Image.fromarray(np.uint8(img*255))
if bbox is None:
return img
bbox = bbox.tolist()
return img, bbox
class Normalize(Transformer):
"""Normalize image and bbox"""
def __init__(self,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
"""
:param: mean (default: list=[0.485, 0.456, 0.406]) - list of means for each image channel
:param: std (default: list=[0.229, 0.224, 0.225]) - list of stds for each image channel
"""
super().__init__()
self.mean=mean
self.std=std
def __call__(self, img, bbox=None):
C, H, W = img.shape
if (self.mean is not None) and (self.std is not None):
mean = torch.tensor(self.mean).view(-1, 1, 1)
std = torch.tensor(self.std).view(-1, 1, 1)
img = (img - mean)/std
if bbox is None:
return img
scale = torch.tensor([[1/W, 1/H, 1/W, 1/H, 1]])
bbox = bbox*scale
return img, bbox
class DeNormalize(Transformer):
"""DeNormalize image and bbox"""
def __init__(self,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
"""
:param: mean (default: list=[0.485, 0.456, 0.406]) - list of means for each image channel
:param: std (default: list=[0.229, 0.224, 0.225]) - list of stds for each image channel
"""
super().__init__()
self.mean=mean
self.std=std
def __call__(self, img, bbox=None):
C, H, W = img.shape
if (self.mean is not None) and (self.std is not None):
mean = torch.tensor(self.mean).view(-1, 1, 1)
std = torch.tensor(self.std).view(-1, 1, 1)
img = img*std + mean
img = torch.clip(img, 0., 1.)
if bbox is None:
return img
rescale = torch.tensor([[W, H, W, H, 1]])
bbox = (bbox*rescale).long()
return img, bbox
class PadBBox(Transformer):
"""Pad bboxes (to make it possible to get batches)"""
def __init__(self, max_num_bbox=20, pad_value=-1.):
"""
:param: max_num_bbox (default: int=20) - maximum number of possible bboxes
:param: pad_value (default: float=0) - padding value
"""
self.max_num_bbox=max_num_bbox
self.pad_value=pad_value
def __call__(self, img, bbox=None):
if bbox is None:
return img
max_bbox = torch.full((self.max_num_bbox, 5), self.pad_value)
num_bbox = len(bbox)
if len(bbox) > self.max_num_bbox:
bbox = bbox[:self.max_num_bbox, :]
num_bbox = self.max_num_bbox
max_bbox[:num_bbox , :] = bbox
return img, max_bbox
__all__ = ['Transformer', 'Compose', 'Resize', 'Rotate', 'HorizontalFlip',
'VerticalFlip', 'ToTensor', 'ToImage', 'Normalize', 'DeNormalize', 'PadBBox'] | StarcoderdataPython |
1895495 | # -*- coding:utf-8 -*-
import os
import time
import functools
from PyQt5 import uic
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from utils import loadJson
from _rpc import _exec, _eval
class ControlPanel(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ControlPanel, self).__init__(parent)
self._config = loadJson()
self.ctrlpanelWidth = self._config['controlpanel']['width']
self.ctrlpanelHeight = self._config['controlpanel']['height']
self.initUI()
self.timer = QtCore.QTimer()
self.timer.setInterval(50)
self.timer.timeout.connect(self.on_update)
self.timer.start()
def initUI(self):
self.ui = uic.loadUi(os.path.join(
os.path.dirname(__file__), "res/ctrlpanel.ui"), self)
self.setFixedSize(self.ctrlpanelWidth, self.ctrlpanelHeight)
self.buttonGroup = []
for attr in dir(self.ui):
obj = getattr(self.ui, attr)
if isinstance(obj, QtWidgets.QPushButton) or \
isinstance(obj, QtWidgets.QToolButton):
obj.setAutoRepeat(True)
obj._repeate = False
obj.clicked.connect(
functools.partial(self.on_handleClicked, obj))
self.buttonGroup.append(obj)
self.ui.stackedWidget.setCurrentIndex(0)
self.onCheckSatus(self.ui.stackedWidget.currentIndex())
self.ui.sys_ctrl.clicked.connect(self.onSys_ctrl)
self.ui.machine_ctrl.clicked.connect(self.onMachine_ctrl)
self.ui.sys_ctrl_2.clicked.connect(self.onSys_ctrl_2)
def onSys_ctrl(self):
# 切换至系统操作面板
self.ui.stackedWidget.setCurrentIndex(0)
self.onCheckSatus(self.ui.stackedWidget.currentIndex())
def onMachine_ctrl(self):
# 切换至机床操作面板
self.ui.stackedWidget.setCurrentIndex(2)
self.onCheckSatus(self.ui.stackedWidget.currentIndex())
def onSys_ctrl_2(self):
# 切换至系统操作面板2
self.ui.stackedWidget.setCurrentIndex(1)
self.onCheckSatus(self.ui.stackedWidget.currentIndex())
def onCheckSatus(self, s):
self.ui.machine_ctrl.setChecked(True if s==2 else False)
self.ui.sys_ctrl.setChecked(True if s==0 else False)
self.ui.sys_ctrl_2.setChecked(True if s==1 else False)
def on_update(self):
for btn in self.buttonGroup:
if btn.property("checked_cmd") is not None:
if str(btn.property("checked_cmd")) != "":
btn.setCheckable(True)
try:
result = _eval(btn.property("checked_cmd"))
btn.setChecked(bool(result))
except BaseException as e:
return
if btn.property("enabled_cmd") is not None:
if str(btn.property("enabled_cmd")) != "":
try:
result = _eval(btn.property("enabled_cmd"))
btn.setEnabled(bool(result))
except BaseException as e:
return
def on_handleClicked(self, btn):
if btn.isDown():
if btn._repeate is False:
btn._repeate = True
btn.setAutoRepeatInterval(50)
btn.setAutoRepeatDelay(0)
self.on_pressed(btn)
else:
self.on_pressed(btn)
elif btn._repeate is True:
btn._repeate = False
self.on_released(btn)
else:
self.on_clicked(btn)
def on_clicked(self, btn):
try:
if btn.property("clicked_cmd") is not None:
_exec(btn.property("clicked_cmd"))
else:
if btn.property("pressed_cmd") is not None:
_exec(btn.property("pressed_cmd"))
time.sleep(0.05)
if btn.property("released_cmd") is not None:
_exec(btn.property("released_cmd"))
except BaseException as e:
print(e)
return
def on_pressed(self, btn):
try:
if btn.property("pressed_cmd") is not None:
_exec(btn.property("pressed_cmd"))
except BaseException as e:
print(e)
return
def on_released(self, btn):
try:
if btn.property("released_cmd") is not None:
_exec(btn.property("released_cmd"))
except BaseException as e:
print(e)
return
| StarcoderdataPython |
8164315 | <filename>convlab2/laug/Word_Perturbation/multiwoz/types.py
from typing import TypeVar, NewType, Union, List, Dict
SampleType = TypeVar("SampleType")
StringType = str
WordType = TokenType = NewType("TokenType", str)
TokenListType = WordListType = List[TokenType]
SentenceType = Union[StringType, TokenListType]
MultiwozSampleType = Dict[str, Union[None, list, dict]]
MultiwozDatasetType = Dict[str, MultiwozSampleType]
| StarcoderdataPython |
3214631 | """Public interface for TensorFlow GNN package.
All the public symbols, data types and functions are provided from this
top-level package. To use the library, you should use a single import statement,
like this:
import tensorflow_gnn as tfgnn
The various data types provided by the GNN library have corresponding schemas
similar to `tf.TensorSpec`. For example, a `FieldSpec` describes an instance of
`Field`, and a `GraphTensorSpec` describes an instance of `GraphTensor`.
"""
from tensorflow_gnn import version
from tensorflow_gnn.graph import adjacency
from tensorflow_gnn.graph import batching_utils
from tensorflow_gnn.graph import graph_constants
from tensorflow_gnn.graph import graph_tensor
from tensorflow_gnn.graph import graph_tensor_encode
from tensorflow_gnn.graph import graph_tensor_io
from tensorflow_gnn.graph import graph_tensor_ops
from tensorflow_gnn.graph import graph_tensor_pprint
from tensorflow_gnn.graph import graph_tensor_random
from tensorflow_gnn.graph import normalization_ops
from tensorflow_gnn.graph import padding_ops
from tensorflow_gnn.graph import preprocessing_common
from tensorflow_gnn.graph import schema_utils
from tensorflow_gnn.graph import schema_validation
from tensorflow_gnn.graph import tag_utils
from tensorflow_gnn.proto import graph_schema
# Package version.
__version__ = version.__version__
# String constants for feature name components, and special feature names.
CONTEXT = graph_constants.CONTEXT
NODES = graph_constants.NODES
EDGES = graph_constants.EDGES
HIDDEN_STATE = graph_constants.HIDDEN_STATE
DEFAULT_STATE_NAME = graph_constants.DEFAULT_STATE_NAME # Deprecated.
# Integer tags.
SOURCE = graph_constants.SOURCE
TARGET = graph_constants.TARGET
# Type annotations for tags.
IncidentNodeTag = graph_constants.IncidentNodeTag
IncidentNodeOrContextTag = graph_constants.IncidentNodeOrContextTag
# Utils for tags.
reverse_tag = tag_utils.reverse_tag
# Encoded names of implicit features.
SIZE_NAME = graph_constants.SIZE_NAME
SOURCE_NAME = graph_constants.SOURCE_NAME
TARGET_NAME = graph_constants.TARGET_NAME
# Field values, specs, and dictionaries containing them.
Field = graph_constants.Field
FieldName = graph_constants.FieldName
FieldOrFields = graph_constants.FieldOrFields
FieldSpec = graph_constants.FieldSpec
Fields = graph_constants.Fields
FieldsSpec = graph_constants.FieldsSpec
# Names and types of node sets and edge sets.
SetName = graph_constants.SetName
SetType = graph_constants.SetType
NodeSetName = graph_constants.NodeSetName
EdgeSetName = graph_constants.EdgeSetName
# Context, node and edge set objects.
Context = graph_tensor.Context
ContextSpec = graph_tensor.ContextSpec
NodeSet = graph_tensor.NodeSet
NodeSetSpec = graph_tensor.NodeSetSpec
EdgeSet = graph_tensor.EdgeSet
EdgeSetSpec = graph_tensor.EdgeSetSpec
# Adjacency data types.
Adjacency = adjacency.Adjacency
AdjacencySpec = adjacency.AdjacencySpec
HyperAdjacency = adjacency.HyperAdjacency
HyperAdjacencySpec = adjacency.HyperAdjacencySpec
# Principal container and spec type.
GraphTensor = graph_tensor.GraphTensor
GraphTensorSpec = graph_tensor.GraphTensorSpec
# Proto description of schema.
GraphSchema = graph_schema.GraphSchema
Feature = graph_schema.Feature
# Preprocessing (batching and padding) types.
FeatureDefaultValues = preprocessing_common.FeatureDefaultValues
SizeConstraints = preprocessing_common.SizeConstraints
# General preprocessing helpers.
dataset_filter_with_summary = preprocessing_common.dataset_filter_with_summary
# I/O functions (input parsing).
parse_example = graph_tensor_io.parse_example
parse_single_example = graph_tensor_io.parse_single_example
get_io_spec = graph_tensor_io.get_io_spec
# GraphTensor batching and padding.
pad_to_total_sizes = padding_ops.pad_to_total_sizes
assert_satisfies_size_constraints = padding_ops.assert_satisfies_size_constraints
satisfies_size_constraints = padding_ops.satisfies_size_constraints
assert_satisfies_total_sizes = padding_ops.assert_satisfies_size_constraints
satisfies_total_sizes = padding_ops.satisfies_size_constraints
# Learned batching and padding.
find_tight_size_constraints = batching_utils.find_tight_size_constraints
learn_fit_or_skip_size_constraints = batching_utils.learn_fit_or_skip_size_constraints
# I/O functions (output encoding).
write_example = graph_tensor_encode.write_example
# Pretty-printing.
graph_tensor_to_values = graph_tensor_pprint.graph_tensor_to_values
# Random generation.
random_graph_tensor = graph_tensor_random.random_graph_tensor
# Operations.
broadcast_node_to_edges = graph_tensor_ops.broadcast_node_to_edges
is_graph_tensor = graph_tensor_ops.is_graph_tensor
pool_edges_to_node = graph_tensor_ops.pool_edges_to_node
broadcast_context_to_nodes = graph_tensor_ops.broadcast_context_to_nodes
broadcast_context_to_edges = graph_tensor_ops.broadcast_context_to_edges
pool_nodes_to_context = graph_tensor_ops.pool_nodes_to_context
pool_edges_to_context = graph_tensor_ops.pool_edges_to_context
gather_first_node = graph_tensor_ops.gather_first_node
get_registered_reduce_operation_names = (
graph_tensor_ops.get_registered_reduce_operation_names)
register_reduce_operation = graph_tensor_ops.register_reduce_operation
shuffle_scalar_components = graph_tensor_ops.shuffle_scalar_components
combine_values = graph_tensor_ops.combine_values
# Normalization operations.
softmax = normalization_ops.softmax
softmax_edges_per_node = normalization_ops.softmax_edges_per_node
# Schema conversion and I/O functions.
parse_schema = schema_utils.parse_schema
read_schema = schema_utils.read_schema
write_schema = schema_utils.write_schema
create_graph_spec_from_schema_pb = schema_utils.create_graph_spec_from_schema_pb
iter_sets = schema_utils.iter_sets
iter_features = schema_utils.iter_features
# Schema validation.
ValidationError = schema_validation.ValidationError
validate_schema = schema_validation.validate_schema
check_required_features = schema_validation.check_required_features
assert_constraints = schema_validation.assert_constraints
# Graph Tensor Validation Utils
check_scalar_graph_tensor = graph_tensor.check_scalar_graph_tensor
# Prune imported module symbols so they're not accessible implicitly,
# except those meant to be used as subpackages, like tfgnn.keras.*.
# Please use the same order as for the import statements at the top.
del version
del adjacency
del batching_utils
del graph_constants
del graph_tensor
del graph_tensor_encode
del graph_tensor_io
del graph_tensor_ops
del graph_tensor_pprint
del graph_tensor_random
del normalization_ops
del padding_ops
del preprocessing_common
del schema_utils
del schema_validation
del tag_utils
del graph_schema
| StarcoderdataPython |
12823183 | <reponame>liuyepiaoxiang/es6-learning
#!usr/bin/sh
# -*- coding:utf8 -*-
# this is a guess number game
import random
secretNumber = random.randint(1,20)
print('请猜一个0到20的数')
# ask the player to guess 6 times.
for guessTaken in range(1,7):
print('Take a guess.')
guess = int(input())
if guess < secretNumber:
print('你猜的数字太小')
elif guess > secretNumber:
print('你猜的数字太大')
else:
break
if guess == secretNumber:
print('好NB!你猜了'+str(guessTaken)+'次就猜中了')
else:
print('很抱歉,谜底是'+str(secretNumber)) | StarcoderdataPython |
11231552 | <reponame>zz-xx/robox86<filename>cogs/others/WebtoonsUpdates.py
from datetime import date
import discord
from discord.ext import commands
from x86 import helpers
class WebtoonsUpdates:
'''Reminds you when new chapter of your favorite webtoon is out'''
@commands.group(aliases=['Webtoons', 'WebToons', 'WEBTOONS', 'webt', 'Webt'])
async def webtoons(self, ctx: commands.Context):
'''Commands associated with webtoons'''
if ctx.invoked_subcommand is None:
await ctx.send(f"You must specify a webtoon. <@{ctx.<EMAIL>.id}>")
await ctx.send(f'Currently available webtoons are : ')
'''
await ctx.send(f'{ctx.bot.cogs}')
coms = ctx.bot.get_cog(self.__class__.__name__)
print(coms)
'''
#t = (ctx.bot.get_cog(self.__class__.__name__), ctx.bot.get_cog_commands(self.__class__.__name__))
#print(ctx.send(t))
cogCommands = ctx.bot.get_cog_commands(self.__class__.__name__)
for c in cogCommands:
#if isinstance(c, commands.Group):
#grouped = ' \n'.join(com.name for com in c.commands)
grouped = ' \n'.join(f'{com.name}\n{com.short_doc}\n\n' for com in c.commands)
print(grouped)
await ctx.send(f'{c.name} {c.short_doc if c.short_doc else "Nothing"}\n\n`{grouped}`')
#else:
#await ctx.send(f'{c.name}, {c.short_doc if c.short_doc else "Nothing"}')
@webtoons.group(aliases=['unordinary', 'uno'])
async def unOrdinary(self, ctx: commands.Context):
'''
The world is not perfect. Learning to deal with its flaws is just a normal part of life.
'''
if ctx.invoked_subcommand is None:
await ctx.send('Holder for embed. unOrdinary description here.')
#no comments here because all these functions are same and just ported from one piece updates cog
@unOrdinary.command(aliases=['Subscribe', 'sub', 'Sub'])
async def subscribe(self, ctx: commands.Context):
'''Subscribe to unOrdinary updates'''
docCount = await ctx.bot.unOrdinaryCollection.count_documents({})
if docCount == 0:
await ctx.send('Empty Collection')
tempDoc = {
'_id': 1,
'current_chap_id' : None
}
result = await ctx.bot.unOrdinaryCollection.insert_one(tempDoc)
await ctx.send(f'result = {result}')
document = {
'_id' : ctx.guild.id,
'guild' : ctx.guild.name,
'assigned_channel' : None,
'post_updates' : True,
'subscribed_users': {
str(ctx.author.id) : {
'date' : str(date.today()),
'user' : ctx.author.name,
'discriminator': ctx.author.discriminator
}
}
}
result = await ctx.bot.unOrdinaryCollection.insert_one(document)
await ctx.send(f'result: {result.inserted_id}')
await ctx.send(f'Successfully subscribed to unOrdinary updates. <@{ctx.author.id}>')
else:
await ctx.send('Collection not empty anymore.')
result = await ctx.bot.unOrdinaryCollection.find_one({'_id': ctx.guild.id})
if result is None:
document = {
'_id' : ctx.guild.id,
'guild' : ctx.guild.name,
'assigned_channel' : None,
'post_updates' : True,
'subscribed_users': {
str(ctx.author.id) : {
'date' : str(date.today()),
'user' : ctx.author.name,
'discriminator': ctx.author.discriminator
}
}
}
result = await ctx.bot.unOrdinaryCollection.insert_one(document)
await ctx.send(f'result: {result.inserted_id}')
await ctx.send(f'Successfully subscribed to unOrdinary updates. <@{ctx.author.id}>')
else:
subscribedUsers = result['subscribed_users']
for userId in list(subscribedUsers):
if int(userId) == ctx.author.id:
await ctx.send(f"You're already subscribed. <@{ctx.author.id}>")
return
document = {
'date' : str(date.today()),
'user' : ctx.author.name,
'discriminator': ctx.author.discriminator
}
result['subscribed_users'][str(ctx.author.id)] = {
'date' : str(date.today()),
'user' : ctx.author.name,
'discriminator': ctx.author.discriminator
}
print(result)
status = await ctx.bot.unOrdinaryCollection.replace_one({'_id': ctx.guild.id}, result)
await ctx.send(f'Replaced {status.modified_count} document.')
await ctx.send(f'Successfully subscribed to unOrdinary updates. <@{ctx.<EMAIL>.id}>')
@unOrdinary.command(aliases=['unsub', 'Unsub', 'Unsubscribe'])
async def unsubscribe(self, ctx: commands.Context):
'''Unsubscribe from unOrdinary updates'''
result = await ctx.bot.unOrdinaryCollection.find_one({'_id': ctx.guild.id})
if result is not None:
try:
del result['subscribed_users'][str(ctx.author.id)]
#print(result)
status = await ctx.bot.unOrdinaryCollection.replace_one({'_id': ctx.guild.id}, result)
await ctx.send(f'Replaced {status.modified_count} document.')
await ctx.send(f'Successfully unsubscribed from One Piece updates. <@{ctx.author.id}>')
except KeyError:
await ctx.send(f'You were never subscribed to begin with. <@{ctx.author.id}>')
return
else:
return
#allow only mods and owner to run this command by creating a custom check
@commands.check(lambda ctx: True if ctx.author.id == <PASSWORD>10 or commands.has_permissions(manage_channels=True) else False)
@unOrdinary.command(aliases=['set'])
async def set_channel(self, ctx: commands.Context, inputTextChannel:str):
'''
Set an channel where bot will post the updates.
Only users with manage_channels permission or admins can do this.
If channel is already assigned it would be over written with the new one.
'''
try:
textChannel = await helpers.textchannel_by_substring(ctx, inputTextChannel)
result = await ctx.bot.unOrdinaryCollection.find_one({'_id': ctx.guild.id})
#Record for guild doesn't exist. Make a record, assign a channel and add to db
if result is None:
document = {
'_id' : ctx.guild.id,
'guild' : ctx.guild.name,
'assigned_channel' : textChannel.id,
'post_updates' : True,
'subscribed_users': {}
}
result = await ctx.bot.unOrdinaryCollection.insert_one(document)
await ctx.send(f'result: {result.inserted_id}')
#record for guild exists just add assigned_channel
else:
result['assigned_channel'] = textChannel.id
status = await ctx.bot.unOrdinaryCollection.replace_one({'_id': ctx.guild.id}, result)
await ctx.send(f'Replaced {status.modified_count} document.')
await ctx.send(f'Channel **`{textChannel}`** found. Updates will be posted on that channel.')
except commands.BadArgument:
await ctx.send(f"No text channel **`{inputTextChannel}`** found. Try again!")
@commands.check(lambda ctx: True if ctx.author.id == 526814235779399710 or commands.has_permissions(manage_channels=True) else False)
@unOrdinary.command()
async def stop(self, ctx: commands.Context):
'''
Stop receiving unOrdinary updates even if users are subscribed.
Only users with manage_channels permission or admins can do this
'''
result = await ctx.bot.unOrdinaryCollection.find_one({'_id': ctx.guild.id})
if result is not None:
if result['post_updates'] == False:
await ctx.send('unOrdinary updates have already been stopped in this guild. Nothing to do.')
else:
result['post_updates'] = False
status = await ctx.bot.unOrdinaryCollection.replace_one({'_id': ctx.guild.id}, result)
await ctx.send(f'Replaced {status.modified_count} document.')
await ctx.send('Successfully stopped bot from posting unOrdinary updates in this guild.')
else:
await ctx.send("No one in this guild is subscribed to unOrdinary updates. \nGuild is already not receiving any updates.")
@commands.check(lambda ctx: True if ctx.author.id == <PASSWORD> or commands.has_permissions(manage_channels=True) else False)
@unOrdinary.command()
async def start(self, ctx: commands.Context):
'''
Start receiving unOrdinary updates in the guild.
Only users with manage_channels permission or admins can do this.
Even if you start the updates and no users are subscribed, no updates will be posted.
Only use this if you stopped updates previously and wanna resume them.
'''
result = await ctx.bot.unOrdinaryCollection.find_one({'_id': ctx.guild.id})
if result is not None:
if result['post_updates'] == True:
await ctx.send('unOrdinary updates are already enabled in this guild. Nothing to do.')
else:
result['post_updates'] = True
status = await ctx.bot.unOrdinaryCollection.replace_one({'_id': ctx.guild.id}, result)
await ctx.send(f'Replaced {status.modified_count} document.')
await ctx.send('This guild will now start receiving unOrdinary updates on assigned channel.')
else:
document = {
'_id' : ctx.guild.id,
'guild' : ctx.guild.name,
'assigned_channel' : None,
'post_updates' : True,
'subscribed_users': {}
}
result = await ctx.bot.unOrdinaryCollection.insert_one(document)
await ctx.send(f'result: {result.inserted_id}')
def setup(bot):
bot.add_cog(WebtoonsUpdates())
| StarcoderdataPython |
6419546 | """
TODO: Move to `godopy/contrib/scripts/` and allow installation from there
"""
from godot import bindings
from godot.nativescript import register_tool_class, register_method
from IPython.terminal.embed import InteractiveShellEmbed
POLL_INTERVAL = 0.05
class GodotPythonShell(InteractiveShellEmbed):
def init_virtualenv(self):
pass
def interact(self):
print(self.separate_in, end='')
try:
code = self.prompt_for_code()
except EOFError:
if self.ask_yes_no('Do you really want to exit ([y]/n)?', 'y', 'n'):
self.ask_exit()
else:
if code:
self.run_cell(code, store_history=True)
return not self.keep_running
class TermShell(bindings.MainLoop):
def _initialize(self):
self.ipshell = GodotPythonShell()
self.ipshell.show_banner()
self.ipshell.keep_running = True
self.time_elapsed = 0
def _idle(self, delta):
self.time_elapsed += delta
if self.time_elapsed >= POLL_INTERVAL:
self.time_elapsed = 0
return self.ipshell.interact()
@staticmethod
def _register_methods():
register_method(TermShell, '_initialize')
register_method(TermShell, '_idle')
def _init():
register_tool_class(TermShell)
| StarcoderdataPython |
3209517 | <reponame>andrewdbell/downhill_skier
import pygame
pygame.init()
screen = pygame.display.set_mode((640, 480))
COLOR_INACTIVE = pygame.Color('lightskyblue3')
COLOR_ACTIVE = pygame.Color('dodgerblue2')
FONT = pygame.font.Font(None, 32)
class InputBox:
def __init__(self, x, y, w, h, text=''):
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if self.rect.collidepoint(event.pos):
# Toggle the active variable.
self.active = not self.active
else:
self.active = False
# Change the current color of the input box.
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
print(self.text)
self.text = ''
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.text += event.unicode
# Re-render the text.
self.txt_surface = FONT.render(self.text, True, self.color)
def update(self):
# Resize the box if the text is too long.
width = max(200, self.txt_surface.get_width()+10)
self.rect.w = width
def draw(self, screen):
# Blit the text.
screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
# Blit the rect.
pygame.draw.rect(screen, self.color, self.rect, 2)
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 300
WHITE = (255, 255, 255)
def main():
pygame.init()
screen = pygame.display.set_mode(size=(SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
input_box1 = InputBox(100, 100, 100, 32)
input_box2 = InputBox(100, 150, 100, 32)
input_boxes = [input_box1, input_box2]
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
for box in input_boxes:
box.handle_event(event)
screen.fill(WHITE)
for box in input_boxes:
box.update()
box.draw(screen)
pygame.display.update()
if __name__ == "__main__":
main()
pygame.quit()
| StarcoderdataPython |
1845780 | # Task 2: Maximum Value calculation
def maximum(value_list):
# Initialize the maximum value with the first value in the list
max_value = value_list[0]
# Iterate through all values of the list
for value in value_list:
# If the current value in the list is higher than the actual max value ...
if value > max_value:
# ... then assign this new value to the max_value and redifine the new maximum
max_value = value
# Return the found maximum value
return max_value
def maximum2(value_list):
# Simple solution by making use of the build-in Python function "max()"
return max(value_list)
# Running the function with example data
if __name__ == '__main__':
maximum_list = [5, 2, 89, 34, 545, 23, 57, 234, 464]
print("\nResults for Maximum in List", maximum_list)
print(maximum(maximum_list))
| StarcoderdataPython |
11229991 | <filename>v1.0.0.test/toontown/parties/DistributedPartyCannonActivityAI.py
from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPartyActivityAI import DistributedPartyActivityAI
class DistributedPartyCannonActivityAI(DistributedPartyActivityAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyCannonActivityAI')
def setMovie(self, todo0, todo1):
pass
def setLanded(self, todo0):
pass
def setCannonWillFire(self, todo0, todo1, todo2):
pass
def cloudsColorRequest(self):
pass
def cloudsColorResponse(self, todo0):
pass
def requestCloudHit(self, todo0, todo1, todo2, todo3):
pass
def setCloudHit(self, todo0, todo1, todo2, todo3):
pass
def setToonTrajectoryAi(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6, todo7, todo8, todo9):
pass
def setToonTrajectory(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6, todo7, todo8, todo9, todo10):
pass
def updateToonTrajectoryStartVelAi(self, todo0, todo1, todo2):
pass
def updateToonTrajectoryStartVel(self, todo0, todo1, todo2, todo3):
pass | StarcoderdataPython |
1860168 | from random import *
import numpy as np
import time
import logging
import tkinter as tk
from PIL import Image, ImageTk
import pygame as pygame
#*************************************************************************
# Classe Paquet:
# Une carte est representé par sa valuer, sa figure et le fichier image
# le represantant.
# La classe offre divers methods pour comparer deux cartes entre elles
#*************************************************************************
class Carte:
"""Une classe carte rudimentaire définie par \n
- sa valeur : 1 à 10, Valet, Dame, Roi\n
- sa couleur : Carreau, Coeur, Pique, Trèfle\n
- sa figure (le nom du fichier image correspondant)"""
__valeur = 0
__couleur = 0
__figure = ""
def __init__(self, valeur, couleur):
"""String*String->Carte
Construit l'objet Carte avec la valeur et la couleur fournclearie"""
self.Attribuer_Valeur(valeur)
self.Attribuer_Couleur(couleur)
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def Obtenir_Valeur(self):
"""None->String
Retourne la valeur de la carte"""
if self.__valeur < 11:
return str(self.__valeur)
elif self.__valeur == 11:
return "Valet"
elif self.__valeur == 12:
return "Dame"
elif self.__valeur == 13:
return "Roi"
def Obtenir_Couleur(self):
"""None->String
retourne la couleur de la carte"""
if self.__couleur == 0:
return "Carreau"
elif self.__couleur == 1:
return "Coeur"
elif self.__couleur == 2:
return "Pique"
elif self.__couleur == 3:
return "Trèfle"
def Obtenir_Code_Couleur(self):
return self.__couleur
def Obtenir_Figure(self):
"""None->String
Retourne le nom du fichier image correspondant à la carte"""
return self.__figure
def Attribuer_Valeur(self, valeur):
"""String->None
Change la valeur de la carte"""
if valeur == "Valet":
self.__valeur = 11
elif valeur == "Dame":
self.__valeur = 12
elif valeur == "Roi":
self.__valeur = 13
else:
self.__valeur = int(valeur)
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def Attribuer_Couleur(self, couleur):
"""String->None
Change la couleur de la carte"""
if couleur == "Carreau":
self.__couleur = 0
elif couleur == "Coeur":
self.__couleur = 1
elif couleur == "Pique":
self.__couleur = 2
elif couleur == "Trèfle":
self.__couleur = 3
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def __Attribuer_Figure(self, valeur, couleur):
"""String*String->None
Attribue le fichier image en fonction de la valeur et de la couleur"""
#self.__figure = str(self.__valeur*10+self.__couleur)+".jpg"
self.__figure = f"{self.Obtenir_Valeur().lower()}-{self.Obtenir_Couleur().lower()}.png"
def __repr__(self):
"""None->None
Permet d'afficher la carte lors de l'appel par print"""
return "{0}-{1}".format(self.Obtenir_Valeur(), self.Obtenir_Couleur())
def __eq__(self, carte):
return ((self.Obtenir_Couleur() == carte.Obtenir_Couleur()) and (self.Obtenir_Valeur() == carte.Obtenir_Valeur()))
# Methodes pour comparer la valeur de la carte (self) par rapport a une autre passé en parametre d'entrée
# La couleur ne compte pas dans ces comparaisons
def __valeur_eq__(self, carte):
return (self.__valeur == carte.__valeur)
def __valeur_gt__(self, carte):
if self.__valeur_eq__(carte):
return False
#L'As bat toute autre carte sauf en cas d'egalité avec un autre As
if self.__valeur == 1:
return True
elif carte.__valeur == 1:
return False
else:
return (self.__valeur > carte.__valeur)
def __valeur_lt__(self, carte):
if self.__valeur_eq__(carte):
return False
return not (self.__valeur_gt__(carte))
#*************************************************************************
# Classe Paquet:
# Un paquet est constinué de N cartes. Le constructeur prend en
# entrée le nombre de cartes du paquet que l'on désir créer
#*************************************************************************
class Paquet:
def __init__(self, nb_cartes):
self.__deck = []
self.__nb_cartes = nb_cartes
couleurs = ["Carreau", "Coeur", "Pique", "Trèfle"]
valeurs = { 52: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Valet", "Dame", "Roi"],
32: ["1", "7", "8", "9", "10", "Valet", "Dame", "Roi"] }
if self.__nb_cartes in [32, 52]:
for couleur in couleurs:
for valeur in valeurs[self.__nb_cartes]:
self.__deck.append(Carte(valeur, couleur))
else:
while len(self.__deck) < nb_cartes:
rand_couleur = randint(0,len(couleurs)-1)
rand_valeur = randint(0,len(valeurs[52])-1)
new_card = Carte(valeurs[52][rand_valeur], couleurs[rand_couleur])
if not self.Carte_Existe(new_card):
self.__deck.append(Carte(valeurs[52][rand_valeur], couleurs[rand_couleur]))
def Obtenir_nombre_cartes(self):
return self.__nb_cartes
def Obtenir_cartes(self):
return self.__deck
def afficher(self):
print(self.Obtenir_cartes())
def melanger(self):
shuffle(self.Obtenir_cartes())
def Carte_Existe(self, card):
if len(self.__deck) > 0:
for i in range(len(self.__deck)):
if card.__eq__(self.__deck[i]):
return True
return False
#*************************************************************************
# Classe Bataille:
# Classe definisaant le jeu ainsi que toutes les actions lié au jeu
#*************************************************************************
class Bataille:
def __init__(self, nb_cartes):
self.__paquet = Paquet(nb_cartes)
self.__cartes_joueurs = {1:[], 2:[]}
#-----------------------------------------------------------------------
# Initialise un nouvelle partie
#-----------------------------------------------------------------------
def initialiser_partie(self):
# Si en mode DEBUG on crée manuellement le jeu de chaque joueur
# Sinon distribue la moitié du paquet de façon aléatoire à chaque joueur
if DEBUG:
self.__cartes_joueurs[1] = []
self.__cartes_joueurs[2] = []
couleurs = ["Carreau", "Coeur", "Pique", "Trèfle"]
for valeur in ["5", "9", "Dame", "10", "4", "10"]:
rand_couleur = randint(0,len(couleurs)-1)
self.__cartes_joueurs[1].append(Carte(valeur, couleurs[rand_couleur]))
for valeur in ["8", "9", "Dame", "10", "7", "2"]:
rand_couleur = randint(0,len(couleurs)-1)
self.__cartes_joueurs[2].append(Carte(valeur, couleurs[rand_couleur]))
else:
self.__paquet.melanger()
paquet_complet = self.__paquet.Obtenir_cartes()
#Distribuer la moité du paquet à chaque joueur
index_milieu = len(paquet_complet) // 2
self.__cartes_joueurs[1] = paquet_complet[:index_milieu]
self.__cartes_joueurs[2] = paquet_complet[index_milieu:]
#-----------------------------------------------------------------------
# Entrée:
# Sortie: Le paquet de cartes du jeu
#-----------------------------------------------------------------------
def obtenir_paquet(self):
return self.__paquet
#-----------------------------------------------------------------------
# Entrée: Un numero de joueur (1 ou 2)
# Sortie: La liste replresantant la carte du joueur passé en entrée
#-----------------------------------------------------------------------
def obtenir_cartes_joueur(self, joueur):
return self.__cartes_joueurs[joueur]
#-----------------------------------------------------------------------
# Un tour de jeu: Prend en entrée la liste des cartes des deux joueurs
# Compare la première carte, établie le vainqueur
# En sortie les deux listes on été mises à jour
#-----------------------------------------------------------------------
def tour_de_jeu_imperatif(self, cartes_joueur_1, cartes_joueur_2):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
cartes_joueur_1.append(cartes_joueur_1[0])
cartes_joueur_1.append(cartes_joueur_2[0])
del cartes_joueur_1[0]
del cartes_joueur_2[0]
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
cartes_joueur_2.append(cartes_joueur_1[0])
cartes_joueur_2.append(cartes_joueur_2[0])
del cartes_joueur_1[0]
del cartes_joueur_2[0]
else:
#En cas d'égalité: Bataille
index_egalite = 0
while cartes_joueur_1[index_egalite].__valeur_eq__(cartes_joueur_2[index_egalite]):
if len(cartes_joueur_1[index_egalite + 1:]) == 0:
break
elif len(cartes_joueur_2[index_egalite + 1:]) == 0:
break
else:
if cartes_joueur_1[index_egalite + 1].__valeur_eq__(cartes_joueur_2[index_egalite + 1]):
index_egalite +=1
else:
break
logging.debug(f"Indice Egalite: {index_egalite}")
if cartes_joueur_1[index_egalite + 1].__valeur_gt__(cartes_joueur_2[index_egalite + 1]):
for i in range(0, index_egalite + 1):
cartes_joueur_1.append(cartes_joueur_1[i])
cartes_joueur_1.append(cartes_joueur_2[i])
del cartes_joueur_1[i]
del cartes_joueur_2[i]
elif cartes_joueur_1[index_egalite + 1].__valeur_lt__(cartes_joueur_2[index_egalite + 1]):
for i in range(0, index_egalite +1):
cartes_joueur_2.append(cartes_joueur_1[i])
cartes_joueur_2.append(cartes_joueur_2[i])
del cartes_joueur_1[i]
del cartes_joueur_2[i]
#-----------------------------------------------------------------------
# Un tour de jeu: Prend en entrée la liste des cartes des deux joueurs
# Compare la première carte, établie le vainqueur
# En cas d'égalité on regarde la caret suivante et ainsi de suite
# jusqu'à ne plus avoir de carte egale en valeur. Dès qu'un joueur a une
# carte plus forte il rafle toutes les cartes précedanctes qui étaient
# egales.
# On utilise la recusrivité pour faciliter l'implementation de cet
# algorythm.
#-----------------------------------------------------------------------
def tour_de_jeu(self, cartes_joueur_1, cartes_joueur_2, nb_batailles=0):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
return 1 * (nb_batailles +1)
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
return -1 * (nb_batailles +1)
else:
#En cas d'égalité: Bataille. On fait un appel recursif
return self.tour_de_jeu(cartes_joueur_1[1:], cartes_joueur_2[1:], nb_batailles+1)
def tour_de_jeu2(self, cartes_joueur_1, cartes_joueur_2):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
return 1, 1
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
return 2, 1
else:
#En cas d'égalité: Bataille. On fait un appel recursif
resultat = self.tour_de_jeu(cartes_joueur_1[1:], cartes_joueur_2[1:])
return resultat[0], 1 + resultat[1]
#------------------------------------------------------------------
# Retourne le joueur gagnant (1 ou 2).
# Si pas encore de gagnant renvoi 0
#------------------------------------------------------------------
def gagnant(self):
if len(self.__cartes_joueurs[1]) == 0:
return 2
elif len(self.__cartes_joueurs[2]) == 0:
return 1
else:
return 0
#------------------------------------------------------------------
# Retourne le joueur gagnant (1 ou 2).
# Si pas encore de gagnant renvoi 0
#------------------------------------------------------------------
def partie_finie(self):
if self.gagnant() == 0:
return False
else:
return True
#------------------------------------------------------------------
# Commencer une partie
#------------------------------------------------------------------
def commencer_partie(self):
self.initialiser_partie()
logging.info("Commencons le jeu")
# Commencer la bataille jusqu'à ce qu'un des joueurs n'ait plus de cartes
i = 1
while not self.partie_finie():
input(f"Appuyer sur une touche pour le round {i}")
logging.info(f"Joueur 1:{self.__cartes_joueurs[1]} -- Joueur 2:{self.__cartes_joueurs[2]}")
resultat = self.tour_de_jeu(self.obtenir_cartes_joueur(1), self.obtenir_cartes_joueur(2))
logging.info(f"Resultat: {resultat}")
if resultat > 0:
for j in range(0, resultat):
self.obtenir_cartes_joueur(1).append(self.obtenir_cartes_joueur(1)[0])
self.obtenir_cartes_joueur(1).append(self.obtenir_cartes_joueur(2)[0])
del self.obtenir_cartes_joueur(1)[0]
del self.obtenir_cartes_joueur(2)[0]
elif resultat < 0:
for j in range(0, (resultat * -1 )):
self.obtenir_cartes_joueur(2).append(self.obtenir_cartes_joueur(1)[0])
self.obtenir_cartes_joueur(2).append(self.obtenir_cartes_joueur(2)[0])
del self.obtenir_cartes_joueur(1)[0]
del self.obtenir_cartes_joueur(2)[0]
'''
joueur_gagnant = resultat[0]
nb_cartes_gagnees = resultat[1]
if joueur_gagnant == 1:
joueur_perdant = 2
else:
joueur_perdant = 1
for i in range(0, nb_cartes_gagnees):
self.obtenir_cartes_joueur(joueur_gagnant).append(self.obtenir_cartes_joueur(joueur_gagnant)[0])
self.obtenir_cartes_joueur(joueur_gagnant).append(self.obtenir_cartes_joueur(joueur_perdant)[0])
del self.obtenir_cartes_joueur(joueur_gagnant)[0]
del self.obtenir_cartes_joueur(joueur_perdant)[0]
'''
logging.info(f"Joueur 1:{self.__cartes_joueurs[1]} -- Joueur 2:{self.__cartes_joueurs[2]}")
i += 1
#*************************************************************************
# Classe BatailleGraphique:
# Cette classe gère toute la partie graphique du jeu mais ulilse la
# classe Bataille comme moteur du jeu lui même. Ainsi toutes les règles
# du jeu, et initialisations de cartes sont fait dans la classe Bataille.
#*************************************************************************
class BatailleGraphique():
# ------------------------------------------------------------------
# Initialization Functions:
# ------------------------------------------------------------------
def __init__(self):
self.jeu= Bataille(0)
self.window = tk.Tk()
self.debug_mode = tk.BooleanVar(value=DEBUG)
self.window.title('Bataille')
self.new_button = tk.Button()
self.exit_button = tk.Button()
self.width=1024
self.height=600
self.canvas = tk.Canvas(self.window, width=self.width, height=self.height)
self.command_canvas = tk.Canvas(self.window, width=self.width, height=50, bd=1, relief='groove')
self.canvas.pack()
self.command_canvas.pack()
self.initialise_gui()
def debug_changed(self):
print(f"Changed: {self.debug_mode.get()}")
global DEBUG
DEBUG = self.debug_mode.get()
if self.debug_mode.get():
self.canvas.create_text(self.width/2, 20, font="cmr 24 bold", fill="Black", text="Mode DEBUG", tags=["debug"])
else:
self.canvas.delete("debug")
def initialise_gui(self):
reduction = 0.65
bg_dimension = int(1500*reduction), int(791*reduction)
las_vegas = ImageTk.PhotoImage(Image.open('images/las_vegas_4.jpg').resize(bg_dimension), Image.ANTIALIAS)
self.canvas.create_image(30, 50, image = las_vegas, anchor = "nw")
self.canvas.create_line(0,self.height/2, self.width, self.height/2)
self.canvas.create_text(90, 20, font="cmr 16 bold", fill="blue", text="Joueur 1")
self.canvas.create_text(90, (self.height) - 15, font="cmr 16 bold", fill="red", text="Joueur 2")
self.new_button = tk.Button(self.command_canvas, text="Nouvelle partie avec", width = 20, activebackground = "blue")
self.new_button.configure(command=lambda: self.commencer_partie(int(entry.get())))
self.new_button.pack(side=tk.LEFT,padx=5, pady=5)
entry = tk.Entry(self.command_canvas, width = 5)
entry.pack(side=tk.LEFT, padx=5, pady=5)
entry.insert(tk.END, '32')
label = tk.Label(self.command_canvas, text = "cartes")
label.pack(side=tk.LEFT, padx=5, pady=5)
tk.Checkbutton(self.command_canvas, text='Debug', command=self.debug_changed,
variable=self.debug_mode, onvalue=True, offvalue=False).pack(side=tk.LEFT, padx=5, pady=5)
self.exit_button = tk.Button(self.command_canvas, text="Sortir", command=lambda: self.window.quit())
self.exit_button.configure(width = 20, activebackground = "red")
self.exit_button.pack(side=tk.RIGHT,padx=20, pady=5)
self.window.mainloop()
def afficher_carte(self, carte, pos_x, pos_y, visible=False):
dimension_carte = 100, 144
if visible:
fichier_image = f'images/{carte.Obtenir_Figure()}'
else:
fichier_image = 'images/dos_carte.jpg'
image_carte=ImageTk.PhotoImage(Image.open(fichier_image).resize(dimension_carte), Image.ANTIALIAS)
self.canvas.create_image(pos_x, pos_y, image=image_carte, tags=["carte"])
self.window.update()
def commencer_partie(self, nb_cartes):
self.new_button["state"] = tk.DISABLED
self.exit_button["state"] = tk.DISABLED
self.jeu = Bataille(nb_cartes)
self.jeu.initialiser_partie()
dimension_carte = 100, 144
decalage_carte = 25
dos_carte = ImageTk.PhotoImage(Image.open('images/dos_carte.jpg').resize(dimension_carte), Image.ANTIALIAS)
x_joueur_1 = 90
y_joueur_1 = 150
x_joueur_2 = 90
y_joueur_2 = (self.height/2) + 150
# Preparer les images de toutes les cartes des deux joueurs
images_cartes={}
for carte in (self.jeu.obtenir_cartes_joueur(1) + self.jeu.obtenir_cartes_joueur(2)):
key=f'{carte.Obtenir_Valeur()}-{carte.Obtenir_Code_Couleur()}'
images_cartes[key]=ImageTk.PhotoImage(Image.open(f'images/{carte.Obtenir_Figure()}').resize(dimension_carte), Image.ANTIALIAS)
while not self.jeu.partie_finie():
cartes_joueur_1 = self.jeu.obtenir_cartes_joueur(1)
cartes_joueur_2 = self.jeu.obtenir_cartes_joueur(2)
logging.debug(f"Joueur 1:{cartes_joueur_1} -- Joueur 2:{cartes_joueur_2}")
# Pour Joueur 1: Afficher le dos de toutes les cartes sauf la premiere. Dans le mode DEBUG afficher toutes les cartes
for i in range(len(cartes_joueur_1)-1, -1, -1):
if i==0:
key=f'{cartes_joueur_1[i].Obtenir_Valeur()}-{cartes_joueur_1[i].Obtenir_Code_Couleur()}'
image_carte=images_cartes[key]
else:
image_carte=dos_carte
carte_joeur_1 = self.canvas.create_image(x_joueur_1 + (i * decalage_carte), y_joueur_1, image=image_carte, tags=["carte"])
self.window.update()
# Pour Joueur 2: Afficher le dos de toutes les cartes sauf la premiere. Dans le mode DEBUG afficher toutes les cartes
for i in range(len(cartes_joueur_2)-1, -1, -1):
if i==0:
key=f'{cartes_joueur_2[i].Obtenir_Valeur()}-{cartes_joueur_2[i].Obtenir_Code_Couleur()}'
image_carte=images_cartes[key]
else:
image_carte=dos_carte
carte_joeur_2 = self.canvas.create_image(x_joueur_2 + (i * decalage_carte), y_joueur_2, image=image_carte, tags=["carte"])
self.window.update()
time.sleep(VITESSE)
resultat = self.jeu.tour_de_jeu(self.jeu.obtenir_cartes_joueur(1), self.jeu.obtenir_cartes_joueur(2))
logging.debug(f"Resultat: {resultat}")
cartes_perdante = []
if resultat > 0:
joueur_gagnant = 1
joueur_perdant = 2
cartes_perdante.append(carte_joeur_2)
direction_annimation = -1
elif resultat < 0:
joueur_gagnant = 2
joueur_perdant = 1
cartes_perdante.append(carte_joeur_1)
direction_annimation = 1
# Retourner successivement toutes les cartes egales si il y en a
cartes_egales={1: [], 2:[]}
if abs(resultat) > 1:
for i in range(abs(resultat)):
key1=f'{cartes_joueur_1[i].Obtenir_Valeur()}-{cartes_joueur_1[i].Obtenir_Code_Couleur()}'
image_carte1=images_cartes[key1]
cartes_egales[1].append(self.canvas.create_image(x_joueur_1 + (i * decalage_carte), y_joueur_1, image=image_carte1, tags=["carte"]))
key2=f'{cartes_joueur_2[i].Obtenir_Valeur()}-{cartes_joueur_2[i].Obtenir_Code_Couleur()}'
image_carte2=images_cartes[key2]
cartes_egales[2].append(self.canvas.create_image(x_joueur_2 + (i * decalage_carte), y_joueur_2, image=image_carte2, tags=["carte"]))
self.window.update()
time.sleep(0.5)
# Animation des carte: La ou les carte perdante(s) vont vers le joueur gagnant en suivant un diagonal
# Hors cas de bataille seule une carte perdants bouge: La carte la plus faible
# En cas de Bataille on bouche toutes les cartes égales avant d'avoir trouvé une carte gagnante
# On concatène à la liste de carte perdante (un seul élément) la liste de toutes les cartes précedantes qui étaient egales (0 ou N cartes)
cartes_perdante.extend(cartes_egales[joueur_perdant])
for j in range(25):
for carte_perdante in cartes_perdante:
self.canvas.move(carte_perdante, 1.5*j, j * direction_annimation)
time.sleep(0.04)
self.window.update()
# Après toutes les animations il est temps de mettre à jour l'état finale des cartes des deux joueurs
# pour préparte le tour suivant.
for i in range(abs(resultat)):
self.jeu.obtenir_cartes_joueur(joueur_gagnant).append(self.jeu.obtenir_cartes_joueur(1)[0])
self.jeu.obtenir_cartes_joueur(joueur_gagnant).append(self.jeu.obtenir_cartes_joueur(2)[0])
del self.jeu.obtenir_cartes_joueur(1)[0]
del self.jeu.obtenir_cartes_joueur(2)[0]
self.canvas.delete("carte")
logging.debug(f'Gagnant: {self.jeu.gagnant()} / Partie Finie: {self.jeu.partie_finie()}')
self.fin_partie()
def fin_partie(self):
self.canvas.delete("all")
score_text = f'Le vainqueur est le joueur {self.jeu.gagnant()}\n'
self.canvas.create_text(self.width/2, self.height/2, font="cmr 20 bold", fill="blue", text=score_text)
self.new_button["state"] = tk.NORMAL
self.exit_button["state"] = tk.NORMAL
def rejouer(self, nb_cartes):
self.initialise_gui()
self.commencer_partie(nb_cartes)
#------------------------------------------------------------------
# Retourne le joueur gagnant (1 ou 2).
# Si pas encore de gagnant renvoi 0
#------------------------------------------------------------------
def gagnant(self):
return self.jeu.gagnant()
def music(song):
pygame.mixer.init()
pygame.mixer.music.load(song)
pygame.mixer.music.play(loops=0)
#////////////////////////////////////////////////////////////////////
#
# Point d'entrée du programme.
#
#///////////////////////////////////////////////////////////////////
def main() -> int:
music('music/september.mp3')
if DEBUG:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
if GUI:
jeu = BatailleGraphique()
else:
jeu = Bataille(12)
jeu.commencer_partie()
logging.info(f"Le gagnant est le joueur {jeu.gagnant()}")
return 0
if __name__ == '__main__':
DEBUG=False
GUI=True
VITESSE=1.0
main()
| StarcoderdataPython |
9754363 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from . import forms
from .models import User
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.core.mail import EmailMessage
from rest_framework.authtoken.models import Token
from django.contrib.auth import logout
from django.db import IntegrityError
def main_login(request):
if request.method == "POST":
if "nickname" in str(request.POST):
form = forms.RegisterForm(request.POST, request.FILES)
if form.is_valid():
cd = form.cleaned_data
nickname = cd["nickname"]
password = cd["password"]
email = cd["email"]
avatar = cd["avatar"]
try:
new_user, created = User.objects.get_or_create(email=email,
nickname=nickname,
avatar=avatar)
new_user.set_password(password)
new_user.is_active = False
new_user.save()
except IntegrityError:
return render(request, "infostring.html", {"infostring":"There's already a user with this email!"})
current_site = get_current_site(request)
message = render_to_string("acc_activate_email.html", {
"user":new_user,
"domain":current_site.domain,
"usercode":new_user.usercode,
"token": account_activation_token.make_token(new_user),
})
mail_subject = "Django chat account activation"
to_email = email
email = EmailMessage(mail_subject, message, to=[to_email])
email.send()
return render(request, "infostring.html", {"infostring":"Please confirm your email address to complete the registration."})
else:
form = forms.LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
email = cd["email"]
password = cd["password"]
user = authenticate(request, email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect("index")
else:
return render(request, "infostring.html", {"infostring":"Banned or not activated"})
else:
return render(request, "infostring.html", {"infostring":repr(cd)})
else:
register_form = forms.RegisterForm()
login_form = forms.LoginForm()
return render(request, "login.html", {"register_form":register_form,
"login_form":login_form,})
def activate(request, usercode, token):
try:
user = User.objects.get(usercode=usercode)
except Exception as E:#(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
return HttpResponse(repr(E))
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
Token.objects.create(user=user)
return redirect("index")
else:
return render(request, "infostring.html", {"infostring":"Activation link is invalid!"})
@login_required
def index(request):
token = Token.objects.get(user=request.user)
return render(request, "index.html", {"token":token,})
@login_required
def logout_view(request):
logout(request)
return redirect("index")
| StarcoderdataPython |
6649877 | """
This file contains puller implementations for XRay
"""
import logging
from datetime import datetime
from typing import Optional, Any, List, Set
from samcli.lib.observability.observability_info_puller import ObservabilityEventConsumer
from samcli.lib.observability.xray_traces.xray_event_puller import AbstractXRayPuller
from samcli.lib.observability.xray_traces.xray_events import XRayServiceGraphEvent
from samcli.lib.utils.time import to_utc, utc_to_timestamp
LOG = logging.getLogger(__name__)
class XRayServiceGraphPuller(AbstractXRayPuller):
"""
ObservabilityPuller implementation which pulls XRay Service Graph
"""
def __init__(
self, xray_client: Any, consumer: ObservabilityEventConsumer, max_retries: int = 1000, poll_interval: int = 1
):
"""
Parameters
----------
xray_client : boto3.client
XRay boto3 client instance
consumer : ObservabilityEventConsumer
Consumer instance which will process pulled events
max_retries : int
Optional maximum number of retries which can be used to pull information. Default value is 1000
poll_interval : int
Optional interval value that will be used to wait between calls in tail operation. Default value is 1
"""
super().__init__(max_retries, poll_interval)
self.xray_client = xray_client
self.consumer = consumer
self._previous_xray_service_graphs: Set[str] = set()
def load_time_period(
self,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
filter_pattern: Optional[str] = None,
):
# pull xray traces service graph
kwargs = {"StartTime": start_time, "EndTime": end_time}
result_paginator = self.xray_client.get_paginator("get_service_graph")
result_iterator = result_paginator.paginate(**kwargs)
for result in result_iterator:
services = result.get("Services", [])
if not services:
LOG.debug("No service graph found%s")
else:
# update latest fetched event
event_end_time = result.get("EndTime", None)
if event_end_time:
utc_end_time = to_utc(event_end_time)
latest_event_time = utc_to_timestamp(utc_end_time)
if latest_event_time > self.latest_event_time:
self.latest_event_time = latest_event_time + 1
self._had_data = True
xray_service_graph_event = XRayServiceGraphEvent(result)
if xray_service_graph_event.get_hash() not in self._previous_xray_service_graphs:
self.consumer.consume(xray_service_graph_event)
self._previous_xray_service_graphs.add(xray_service_graph_event.get_hash())
def load_events(self, event_ids: List[str]):
LOG.debug("Loading specific service graph events are not supported via XRay Service Graph")
| StarcoderdataPython |
3455697 | from __future__ import (absolute_import, division, print_function)
from odm2api.ODMconnection import SessionFactory
from odm2api.models import CVElevationDatum, setSchema
import pytest
__author__ = 'valentine'
dbs_readonly = [
['mysql:ODM@Localhost/', 'mysql', 'mysql+pymysql://ODM:odm@localhost/'],
['mysql"root@Localhost/', 'mysql', 'mysql+pymysql://root@localhost/'],
['mysql:ODM@Localhost/odm2', 'mysql', 'mysql+pymysql://ODM:odm@localhost/odm2'],
['mysql"root@Localhost/odm2', 'mysql', 'mysql+pymysql://root@localhost/odm2'],
['postgresql_marchantariats_none', 'postgresql',
'postgresql+psycopg2://postgres:None@localhost/marchantariats',
'marchantariats', 'postgres', None],
['postgresql_marchantariats_empty', 'postgresql',
'postgresql+psycopg2://postgres@localhost/marchantariats',
'marchantariats', 'postgres', None],
['sqlite_wof', 'sqlite', 'sqlite:///./tests/spatialite/wof2odm/ODM2.sqlite', None, None, None]
]
dbs_test = [
['sqlite_test', 'sqlite' './tests/spatialite/odm2_test.sqlite', None, None, None]
]
class aSessionFactory:
def __init__(self, request):
db = request.param
print ('dbtype', db[0], db[1])
session_factory = SessionFactory(db[2])
setSchema(session_factory.engine)
assert session_factory is not None, ('failed to create a session for ', db[0], db[1])
self.session = session_factory.getSession()
@pytest.fixture(scope='session', params=dbs_readonly)
def setup(request):
return aSessionFactory(request)
def test_aSessionFactory(setup):
q = setup.session.query(CVElevationDatum)
results = q.all()
assert len(results) > 0
| StarcoderdataPython |
5178744 | import argparse
import os
import re
from django.apps import apps
from django.db import models
from django.conf import settings
from django.core.files import File
from django.core.management.base import BaseCommand
_ = lambda x: x
DB_PATTERN = re.compile(r'^\w+\.\w+/bytes/filename/mimetype$')
class Command(BaseCommand):
help = _("Copy older media files into database after the migration to db_file_storage. See --help for more.")
def __init__(self):
super(Command, self).__init__()
try:
self.MEDIA_ROOT = getattr(settings, 'MEDIA_ROOT')
except AttributeError:
self.stderr.write(
'Please configure MEDIA_ROOT in your settings. '
'Otherwise files in standard file storage cannot be found.')
def add_arguments(self, parser):
parser.formatter_class = argparse.RawTextHelpFormatter
parser.description = """
------------------------------------------------------------
Use this after the migration to db_file_storage to copy earlier media files from file system into database.
1. Migrate to db_file_storage first (change models, do makemigrations & migrate),
2. Run './manage.py files2db' to copy earlier media into db.
If --sandbox is used, media files from (earlier) standard storage will NOT be copied into db storage.
With --sandbox this can be used before migration too. Without it this will fail before the migration.
Without --sandbox the media files will be converted. (Repeated run will show that media are in db.)
Original files in MEDIA_ROOT remain unchanged.
------------------------------------------------------------
"""
parser.add_argument('-s', '--sandbox', action="store_true",
help=_("sandbox; do NOT copy media from standard storage into db storage"))
def handle(self, *args, **options):
sandbox = options.get('sandbox')
total_std = 0
for app, tbl, model, fld in self.get_media_fields():
if re.match(DB_PATTERN, fld.upload_to):
kwargs = {
'{0}__exact'.format(fld.name): '',
}
media_files = model.objects.select_for_update().exclude(**kwargs).only(
model._meta.pk.name, fld.name)
cnt_non_db = cnt_format_unknown = 0
for media_file in media_files:
field_file = getattr(media_file, fld.name)
if not re.match(DB_PATTERN, os.path.dirname(field_file.name)):
cnt_non_db += 1
maybe_file = os.path.join(self.MEDIA_ROOT, field_file.name)
if os.path.isfile(maybe_file):
if not sandbox:
self.cp(field_file, maybe_file)
else:
cnt_format_unknown += 1
total_std += cnt_non_db - cnt_format_unknown
self.report(app, tbl, fld, 'db', len(media_files), cnt_non_db, cnt_format_unknown, hideinfo=sandbox)
else:
self.report(app, tbl, fld, 'non-db')
if total_std:
if sandbox:
msg = _('No conversion made. Count of files to be converted in non-sandbox run')
else:
msg = _('Count of files which were copied into db')
self.stdout.write('%s: %s' % (msg, total_std))
@staticmethod
def cp(field_file, filename):
new_name = os.path.basename(filename)
with open(filename, 'rb') as f:
field_file.save(new_name, File(f))
def report(self, app, tbl, fld, storage, cntfiles=None, cnt_non_db=0, cnt_format_unknown=0, hideinfo=True):
storage += ' storage'
if cntfiles is not None:
cnt_std = cnt_non_db - cnt_format_unknown
if cnt_std and not hideinfo:
infomsg = ' (%s)' % _('will be copied into db')
else:
infomsg = ''
storage += ' - contains %s file(s): %s std format%s - %s%s db format' % (
cntfiles, cnt_std, infomsg,
'%s unknown - ' % cnt_format_unknown if cnt_format_unknown else '', cntfiles - cnt_non_db)
self.stdout.write('%s %s %s - %s' % (app.label, tbl, fld.name, storage))
@staticmethod
def get_media_fields():
for app in apps.get_app_configs():
for tbl, model in app.models.items():
for fld in model._meta.get_fields():
if isinstance(fld, models.FileField):
yield (app, tbl, model, fld)
| StarcoderdataPython |
6645970 |
# This feature can be used to reduce the memory size consumed by the feature layer of the top MLP.
# Suppose we have n sparse features, each sparse features is represented by an embedding of size d,
# then, we can represent the sparse embeddings by a matrix X = (n, d). The dot product between sparse
# features is X(X^T), which is a symmetric matrix of (n, n) and will be fed into the top MLP.
# Actually We only need the upper or lower traingles to eliminate duplication. If n is large,
# such as, n = 1000, then the number of dot features fed into the MLP will be n^2/2 = 50,000.
# Considering the layer size 4096, the weight parameters will be a matrix (n^2/2, 4096), which
# may consume a large amount of precious memory resources.
# To reduce the number of dot features, we introduce a parameter called arch-projec-size (k) to compress
# the embeddings. We introduce a parameter matrix Y = (n, k) to compute the weighted sum of the
# dot features. The compressed embeddings is represented by (X^T)Y. Then, we compute the compressed dot
# features by X(X^T)Y = (n, k). Therefore, we can reduce the dot features fed into MLP from n*n/2
# to n*k.
import sys
import torch
import torch.nn as nn
import numpy as np
"""
Compute the projected dot features
T: (batch_size, n, d), batched raw embeddings
x: dense features
proj_layer: the projection layer created by create_proj
"""
def project(T, x, proj_layer):
TT = torch.transpose(T, 1, 2)
# TS = torch.reshape(TT, (-1, TT.size(2)))
# TC = proj_layer(TS)
# TR = torch.reshape(TC, (-1, T.shape[2], k))
TR = proj_layer(TT)
Z = torch.bmm(T, TR)
Zflat = Z.view((T.shape[0], -1))
R = torch.cat([x] + [Zflat], dim=1)
return R
"""
Create the project layer
n: number of sparse features
m: projection size
"""
def create_proj(n, m):
# build MLP layer by layer
layers = nn.ModuleList()
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
# approach 2: constant value ?
layers.append(LL)
return torch.nn.Sequential(*layers)
| StarcoderdataPython |
9655874 | """
Early fusion scorer (i.e., object-centric model using language modeling).
@author: <NAME>
@author: <NAME>
"""
from nordlys.logic.fusion.fusion_scorer import FusionScorer
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.retrieval_results import RetrievalResults
from collections import Counter
import math
import snowballstemmer
stemmer = snowballstemmer.stemmer('english')
class EarlyFusionScorer(FusionScorer):
def __init__(self, index_name, association_file, assoc_mode, retr_params, run_id="fusion", field="content",
num=100):
"""
:param index_name: name of index
:param association_file: document-object association file
:param assoc_mode: document-object weight mode, uniform or binary
:param lambda: smoothing parameter
:param field: field to be searched
"""
self._index_name = index_name
self._elastic = Elastic(self._index_name)
self._lambda = retr_params.get("lambda", 0.1)
self._field = field
self._collection_length = self._elastic.coll_length(self._field)
self._assoc_mode = assoc_mode
self._num = num
self.association_file = association_file
self.assoc_doc = {}
self.assoc_obj = {}
self.run_id = run_id
def score_query(self, query):
"""
Scores a given query.
:param query: query to be searched
:return: pqo
"""
# retrieving documents
aquery = self._elastic.analyze_query(query)
pr = self._elastic.search(aquery, self._field, num=self._num)
q = self.parse(aquery)
# scoring objects, i.e., computing P(q|o)
pqo = {}
qt = Counter(q)
for t, ftq in qt.items():
# Scores each query term and sums up, i.e., computing P(t|o)
# Gets term frequency in collections
term = stemmer.stemWords(t.split())[0]
try:
ftc = self._elastic.coll_term_freq(term, self._field)
if ftc == None:
print("Ignore term", t)
continue
except:
print("Ignore term", t)
continue
ptc = ftc / self._collection_length
# Fuses ptd for each object
ptd_fused = {}
for item in pr.keys():
doc_id = item
if doc_id in self.assoc_doc:
try:
ftd = self._elastic.term_freq(doc_id, term, self._field)
except: # the content of doc is empty
ftd = 0
doc_length = self._elastic.doc_length(doc_id, self._field)
ptd = ftd / doc_length
for object_id in self.assoc_doc[doc_id]:
if self._assoc_mode == FusionScorer.ASSOC_MODE_BINARY:
w_do = 1
elif self._assoc_mode == FusionScorer.ASSOC_MODE_UNIFORM:
w_do = 1 / len(self.assoc_obj[object_id])
else:
w_do = 0 # this should never happen
ptd_fused[object_id] = ptd_fused.get(object_id, 0) + ptd * w_do
# Adds pto to pqo
for object_id in self.assoc_obj.keys():
fptd = ptd_fused.get(object_id, 0)
pto = math.log((1 - self._lambda) * fptd + self._lambda * ptc) * ftq
pqo[object_id] = pqo.get(object_id, 0) + pto
return RetrievalResults(pqo)
| StarcoderdataPython |
3572559 | <filename>tests/bamResample.py<gh_stars>10-100
#!/usr/bin/env python
import sys
import re
import pysam
import random
import os
from optparse import OptionParser
opts = OptionParser()
usage = "usage: %prog [options] [inputs] Software to process aligned bam files and generate the XB (Cell Barcode) tag"
opts = OptionParser(usage=usage)
opts.add_option("--bam1", help="Filename of the first bam to be input")
opts.add_option("--bam2", help="Filename of the second bam to be input")
opts.add_option("--reads", help="Filename of the second bam to be input")
opts.add_option("--prop", help="Proportion of reads (between 0 and 1) to come from bam 1")
options, arguments = opts.parse_args()
bam1 = options.bam1
bam2 = options.bam2
nreads = float(options.reads)
prop = float(options.prop)
def bam_read_count_mito(bamfile):
""" Return a tuple of the number of mapped and unmapped reads in a bam file """
p = pysam.idxstats(bamfile)
mapped = 0
unmapped = 0
for line in p.split("\n"):
rname, rlen, nm, nu = line.rstrip().split("\t")
if(rname == "chrM"):
mapped += int(nm)
unmapped += int(nu)
return (mapped)
# Inter # of reads in bam file and how many are in the target
bam1counts = bam_read_count_mito(bam1)
bam2counts = bam_read_count_mito(bam2)
target1 = prop * nreads
target2 = (1-prop) * nreads
# Verify number of reads looks good
if(target1 > bam1counts):
sys.exit("Not enough reads in bam 1 given configuration")
if(target2 > bam2counts):
sys.exit("Not enough reads in bam 2 given configuration")
prop1hit = target1/bam1counts
prop2hit = target2/bam2counts
# Setup output file
mixy = "mix_" + str(prop) + "_" + os.path.basename(os.path.splitext(bam1)[0]) + "_" + str(1-prop) + "_" + os.path.basename(os.path.splitext(bam2)[0])
new_bam_name = mixy + ".bam"
coverageRatio = mixy + ".coverage.csv"
bam1io = pysam.AlignmentFile(bam1, "rb")
bam2io = pysam.AlignmentFile(bam2, "rb")
out = pysam.AlignmentFile("temp_" + new_bam_name, "wb", template = bam1io)
n = 16751
maxBP = 16751
countsA1 = [0] * n
countsC1 = [0] * n
countsG1 = [0] * n
countsT1 = [0] * n
countsA2 = [0] * n
countsC2 = [0] * n
countsG2 = [0] * n
countsT2 = [0] * n
# Parse bam data and write
n1 = 0
for read in bam1io:
if(random.random() < prop1hit):
seq = read.seq
out.write(read)
n1 = n1 + 1
for qpos, refpos in read.get_aligned_pairs(True):
if qpos is not None and refpos is not None:
if(seq[qpos] == "A"):
countsA1[refpos] += 1
elif(seq[qpos] == "C"):
countsC1[refpos] += 1
elif(seq[qpos] == "G"):
countsG1[refpos] += 1
elif(seq[qpos] == "T"):
countsT1[refpos] += 1
bam1io.close()
n2 = 0
for read in bam2io:
if(random.random() < prop2hit):
seq = read.seq
out.write(read)
n2 = n2 + 1
for qpos, refpos in read.get_aligned_pairs(True):
if qpos is not None and refpos is not None:
if(seq[qpos] == "A"):
countsA2[refpos] += 1
elif(seq[qpos] == "C"):
countsC2[refpos] += 1
elif(seq[qpos] == "G"):
countsG2[refpos] += 1
elif(seq[qpos] == "T"):
countsT2[refpos] += 1
bam2io.close()
out.close()
# Output the coverages
def writeSparseMatrixN(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11):
with open(coverageRatio,"w") as V:
V.write("BP,"+str(os.path.basename(os.path.splitext(bam1)[0]))+","+str(os.path.basename(os.path.splitext(bam2)[0])) + ",ratio,A1,C1,G1,T1,A2,C2,G2,T2\n")
for i in range(0,int(maxBP)-1):
V.write(str(i+1)+","+str(v1[i])+","+str(v2[i])+","+str(v3[i])+","+str(v4[i])+","+str(v5[i])+","+str(v6[i])+","+str(v7[i])+","+str(v8[i])+","+str(v9[i])+","+str(v10[i])+","+str(v11[i])+"\n")
zipped_list1 = zip(list(countsA1),list(countsC1),list(countsG1),list(countsT1))
sums1 = [sum(item) for item in zipped_list1]
zipped_list2 = zip(list(countsA2),list(countsC2),list(countsG2),list(countsT2))
sums2 = [sum(item) for item in zipped_list2]
ratio = [round((x/( x+ y + 0.00001)), 3) for x, y in zip(sums1, sums2)]
writeSparseMatrixN(sums1, sums2, ratio, countsA1, countsC1, countsG1, countsT1, countsA2, countsC2, countsG2, countsT2)
# Cleanup
pysam.sort("-o", new_bam_name, "temp_" + new_bam_name)
pysam.index(new_bam_name)
os.remove("temp_" + new_bam_name)
| StarcoderdataPython |
396341 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 13:20:01 2021
@author: mclea
"""
import field as f
import possion_matrix
import MG
import matplotlib.pyplot as plt
k = 7
n = 2**k+2
m = 2**k+2
x = f.new_field(n, m, top=10, bottom=-10, left=0, right=0, bc=0)
b = f.new_field(n, m, top=0, bottom=0, left=0, right=0)
pm = possion_matrix.possion_matrix(x)
x = MG.MGV(x, b, pm)
plt.imshow(x.x)
# A, LaddU, invD = possion_matrix.create_A(field.n, field.n)
# Rj = possion_matrix.calc_RJ(field.n, field.n)
# MG.jacobi_update(x, b, pm.A[0], pm.Rj[0], pm.invD[0], nsteps=2000, max_err=1e-3)
# plt.imshow(x.x) | StarcoderdataPython |
4962990 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
import sandbox.rocky.tf.core.layers as L
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import logger
NONE = list()
class DeterministicMLPRegressor(LayersPowered, Serializable):
"""
A class for performing nonlinear regression.
"""
def __init__(
self,
name,
input_shape,
output_dim,
network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
optimizer=None,
normalize_inputs=True,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
"""
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if optimizer is None:
optimizer = LbfgsOptimizer(name="optimizer")
self.output_dim = output_dim
self.optimizer = optimizer
if network is None:
network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
name="network"
)
l_out = network.output_layer
LayersPowered.__init__(self, [l_out])
xs_var = network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="ys")
x_mean_var = tf.get_variable(
name="x_mean",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(0., dtype=tf.float32)
)
x_std_var = tf.get_variable(
name="x_std",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(1., dtype=tf.float32)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var})
loss = - tf.reduce_mean(tf.square(fit_ys_var - ys_var))
self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var)
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[fit_ys_var],
)
optimizer_args["inputs"] = [xs_var, ys_var]
self.optimizer.update_opt(**optimizer_args)
self.name = name
self.l_out = l_out
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
# def fit(self, xs, ys):
# if self._normalize_inputs:
# # recompute normalizing constants for inputs
# new_mean = np.mean(xs, axis=0, keepdims=True)
# new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
# tf.get_default_session().run(tf.group(
# tf.assign(self._x_mean_var, new_mean),
# tf.assign(self._x_std_var, new_std),
# ))
# inputs = [xs, ys]
# loss_before = self._optimizer.loss(inputs)
# if self._name:
# prefix = self._name + "_"
# else:
# prefix = ""
# logger.record_tabular(prefix + 'LossBefore', loss_before)
# self._optimizer.optimize(inputs)
# loss_after = self._optimizer.loss(inputs)
# logger.record_tabular(prefix + 'LossAfter', loss_after)
# logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
| StarcoderdataPython |
3347135 | <reponame>stungkit/Copycat-abstractive-opinion-summarizer
from nltk.util import ngrams as compute_ngrams
import numpy as np
from collections import OrderedDict
X_AND_X_PROP = 'x_and_x_prop'
X_AND_X_COUNT = 'x_and_x_count'
UN_SENT_PROP_PROP = 'un_sent_prop'
AVG_SEQ_LEN = 'avg_seq_len'
UN_SENTS = 'un_sents'
TOTAL_SENTS = 'total_sents'
def ngram_seq_analysis(seqs, tokenizer, sent_splitter,
n_grams_to_comp=(2, 3, 4, 5)):
"""
Performs sequence repetition analytics based on:
1. Unique N-grams proportion
2. Unique sentences proportion
3. X and X pattern (e.g. good and good) - the count of detected patterns
At the moment the analytics are mainly on the level of individual sequences.
N-grams are computed considering sentences.
:param seqs: list/array of sequence strings.
:param tokenizer: function for splitting strings to list of tokens.
:param sent_splitter: function for splitting strings to list of sentence
strings.
:param n_grams_to_comp: what n-grams to consider for analysis.
:return: list with tuples containing aggregated over the number of sequences
stats.
"""
n_gram_str_fn = lambda x: "un_%dgr_prop" % x
seqs_sents = [sent_splitter(seq_sents_tokens) for seq_sents_tokens in seqs]
# seqs_sents_tokens is a triple nested list
seqs_sents_tokens = [[tokenizer(sent) for sent in sents] for sents
in seqs_sents]
# for each sequence it's the number of unique n-grams / total n-grams
stats = OrderedDict()
for ngr in n_grams_to_comp:
stats[n_gram_str_fn(ngr)] = []
# special repetition pattern observed in the generated sequences
stats[X_AND_X_PROP] = []
total_seq_len = 0.
for seq_sents_tokens in seqs_sents_tokens:
# n-gram related statistics
for ngr in n_grams_to_comp:
n_grams = []
for sent_toks in seq_sents_tokens:
n_grams += list(compute_ngrams(sent_toks, ngr))
avg_un_ngrams = float(len(set(n_grams))) / len(n_grams) if len(
n_grams) > 0 else 0.
stats[n_gram_str_fn(ngr)].append(avg_un_ngrams)
# x and x patterns and seq lens
x_and_x_count = 0
for sent_toks in seq_sents_tokens:
x_and_x_count += count_x_and_x_patterns(sent_toks)
total_seq_len += len(sent_toks)
stats[X_AND_X_PROP].append(x_and_x_count)
# computing sentence related analytics
stats[UN_SENT_PROP_PROP] = []
total_un_sents = 0
total_sents = 0
for seq_sents in seqs_sents:
# remove the last ./!/? if it's present
un_sents = set()
for sent in seq_sents:
if sent[-1] in [".", "!", "?"]:
sent = sent[:-1]
un_sents.add(sent)
total_un_sents += len(un_sents)
total_sents += len(seq_sents)
avg_un_sents_prop = float(len(un_sents)) / len(seq_sents) if len(
seq_sents) > 0 else 0.
stats[UN_SENT_PROP_PROP].append(avg_un_sents_prop)
# averaging over the number of seqs
res = [(k, np.mean(v)) for k, v in stats.items()]
# extra stats
res.append((UN_SENTS, total_un_sents))
res.append((TOTAL_SENTS, total_sents))
res.append((AVG_SEQ_LEN, total_seq_len / len(seqs)))
res.append((X_AND_X_COUNT, np.sum(stats[X_AND_X_PROP])))
return res
def count_x_and_x_patterns(tokens):
x_and_x_count = 0
for i in range(len(tokens)):
if i != 0 and i + 1 < len(tokens) and tokens[i] == "and" and \
tokens[i - 1] == tokens[i + 1]:
x_and_x_count += 1
return x_and_x_count
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.