text stringlengths 38 1.54M |
|---|
# Calcule a média entre dois valores
va1= float(input('valor 1: '))
va2 =float(input('valor 2: '))
media = (va1 + va2)/2
print('A média entre {} e {} é igual a {:.1f}' .format(va1, va2, media))
|
import numpy as np
import matplotlib.pyplot as plt
from src.utils2 import get_path
from src.data_interface import trd, L
path = get_path(__file__) + '/..'
for fname in L[2:]:
for tid in trd.trial_id_list:
v = trd.get_trial(tid).get_feature(fname).view()
plt.plot(range(len(v)), v, 'b-', alpha=0.1)
ax = plt.gca()
ax.set_xlim(0,1200)
ax.set_xlabel('Observation number')
ax.set_ylabel(fname)
ax.set_title('{0} in 500 trials overlayed'.format(fname))
plot_path = '{0}/plots/naive_{1}.'.format(path, fname)
plt.savefig(plot_path + 'png', format='png', dpi=300)
plt.savefig(plot_path + 'pdf', format='pdf')
plt.cla()
|
# Databricks notebook source
# MAGIC %md # Delta Generated Columns & buckets
# MAGIC Delta now supports Generated Columns syntax to specify how a column is computed from other columns.
# MAGIC A generated column is a special column that’s defined with a SQL expression when creating a table.
# MAGIC
# MAGIC This is useful to leverage partitions by applying filter on a derivated column. It's also being used to generate buckets in your tables to speedup your joins
# COMMAND ----------
# DBTITLE 1,Let's prepare our data first
# MAGIC %run ./resources/00.0-setup-bucketing $reset_all_data=$reset_all_data
# COMMAND ----------
# DBTITLE 1,Let's create a table partitioned by a GENERATED column, using a standard spark expression
# MAGIC %sql
# MAGIC CREATE TABLE IF NOT EXISTS turbine_silver_partitioned
# MAGIC (AN3 DOUBLE, AN4 DOUBLE, AN5 DOUBLE, AN6 DOUBLE, AN7 DOUBLE, AN8 DOUBLE, AN9 DOUBLE, AN10 DOUBLE, SPEED DOUBLE, TORQUE DOUBLE, ID DOUBLE, TIMESTAMP TIMESTAMP,
# MAGIC yymmdd date GENERATED ALWAYS AS ( CAST(TIMESTAMP AS DATE) ))
# MAGIC USING delta;
# MAGIC
# MAGIC insert into turbine_silver_partitioned (AN3, AN4, AN5, AN6, AN7, AN8, AN9, AN10, SPEED, TORQUE, ID, TIMESTAMP) SELECT * FROM turbine_silver;
# COMMAND ----------
# MAGIC %md
# MAGIC We can now add filter on the TIMESTAMP column (typically what you'd use to filter your data from a BI tool), and the filter will be pushed down at the partition level:
# COMMAND ----------
# MAGIC %python
# MAGIC # filtering on the TIMESTAMP now push down the filter to the yymmdd partition: PartitionFilters: [(yymmdd#4702 >= cast(2020-05-09 15:17:05 as date))]
# MAGIC print(spark.sql("""explain select * from turbine_silver_partitioned where TIMESTAMP > '2020-05-09 15:17:05'""").first()[0])
# COMMAND ----------
# MAGIC %md ## Improving join performance with table bucketing
# MAGIC Table bucketing leverage generated column to add an extra layer on your table layout (folders like partition).
# MAGIC
# MAGIC If both of your tables are bucketed by the field you're performing the join, the shuffle will disappear in the join as the data is already bucketed
# COMMAND ----------
# DBTITLE 1,Let's explore what is being delivered by our wind turbines stream: (json)
# MAGIC %sql
# MAGIC CREATE TABLE IF NOT EXISTS turbine_silver_bucket
# MAGIC (AN3 DOUBLE, AN4 DOUBLE, AN5 DOUBLE, AN6 DOUBLE, AN7 DOUBLE, AN8 DOUBLE, AN9 DOUBLE, AN10 DOUBLE, SPEED DOUBLE, TORQUE DOUBLE, ID DOUBLE, TIMESTAMP TIMESTAMP,
# MAGIC yymmdd date GENERATED ALWAYS AS ( CAST(TIMESTAMP AS DATE) ))
# MAGIC USING delta
# MAGIC CLUSTERED by (id) into 16 buckets ;
# MAGIC
# MAGIC insert into turbine_silver_bucket (AN3, AN4, AN5, AN6, AN7, AN8, AN9, AN10, SPEED, TORQUE, ID, TIMESTAMP) SELECT * FROM turbine_silver;
# COMMAND ----------
# MAGIC %md #### Group By operations
# COMMAND ----------
# DBTITLE 1,Non bucketed plan. Note the "Exchange hashpartitioning" stage
# MAGIC %python display_plan("select id, count(*) from turbine_silver group by id")
# COMMAND ----------
# DBTITLE 1,Bucketed, no "Exchange" stage
# MAGIC %python display_plan("select id, count(*) from turbine_silver_bucket group by id")
# COMMAND ----------
# MAGIC %md #### Bucketing also works for joins, window operation etc
# MAGIC
# MAGIC **Result at scale: up to 5x JOIN speedup with DBR8; up to 50%+ with Photon**
# COMMAND ----------
# MAGIC %sql
# MAGIC create table if not exists turbine_status_gold_bucket
# MAGIC (id int, status string)
# MAGIC using delta
# MAGIC CLUSTERED by (id) into 16 buckets ;
# MAGIC
# MAGIC insert into turbine_status_gold_bucket (id, status) SELECT * FROM turbine_status_gold;
# COMMAND ----------
# DBTITLE 1,Non bucketed plan. Note the "Exchange hashpartitioning" stage
# MAGIC %python
# MAGIC display_plan("select /*+ SHUFFLE_MERGE(status) */ * from turbine_status_gold status inner join turbine_silver data on status.id = data.id")
# MAGIC #Note: we're adding SHUFFLE_MERGE(status) hint to disable broadcast hashjoin as it's a small table
# COMMAND ----------
# DBTITLE 1,Bucketed, no "Exchange" stage
# MAGIC %python display_plan("select /*+ SHUFFLE_MERGE(status) */ * from turbine_status_gold_bucket status inner join turbine_silver_bucket data on status.id = data.id")
# MAGIC #Note: we're adding SHUFFLE_MERGE(status) hint to disable broadcast hashjoin as it's a small table
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The bioprint Project - Released under terms of the AGPLv3 License"
import io
import unittest
import mock
import bioprint.filemanager
import bioprint.filemanager.util
class FilemanagerMethodTest(unittest.TestCase):
def setUp(self):
# mock plugin manager
self.plugin_manager_patcher = mock.patch("bioprint.plugin.plugin_manager")
self.plugin_manager_getter = self.plugin_manager_patcher.start()
self.plugin_manager = mock.MagicMock()
hook_extensions = dict(
some_plugin=lambda: dict(dict(machinecode=dict(foo=["foo", "f"]))),
other_plugin=lambda: dict(dict(model=dict(amf=["amf"]))),
mime_map=lambda: dict(
mime_map=dict(
mime_map_yes=bioprint.filemanager.ContentTypeMapping(["mime_map_yes"], "application/mime_map_yes")
)
),
mime_detect=lambda: dict(
dict(
machinecode=dict(
mime_detect_yes=bioprint.filemanager.ContentTypeDetector(["mime_detect_yes"], lambda x: "application/mime_detect_yes"),
mime_detect_no=bioprint.filemanager.ContentTypeDetector(["mime_detect_no"], lambda x: None)
)
)
)
)
self.plugin_manager.get_hooks.return_value = hook_extensions
self.plugin_manager_getter.return_value = self.plugin_manager
def tearDown(self):
self.plugin_manager_patcher.stop()
def test_full_extension_tree(self):
full = bioprint.filemanager.full_extension_tree()
self.assertTrue("machinecode" in full)
self.assertTrue("gcode" in full["machinecode"])
self.assertTrue(isinstance(full["machinecode"]["gcode"], bioprint.filemanager.ContentTypeMapping))
self.assertItemsEqual(["gcode", "gco", "g"], full["machinecode"]["gcode"].extensions)
self.assertTrue("foo" in full["machinecode"])
self.assertTrue(isinstance(full["machinecode"]["foo"], list))
self.assertItemsEqual(["f", "foo"], full["machinecode"]["foo"])
self.assertTrue("model" in full)
self.assertTrue("stl" in full["model"])
self.assertTrue(isinstance(full["model"]["stl"], bioprint.filemanager.ContentTypeMapping))
self.assertItemsEqual(["stl"], full["model"]["stl"].extensions)
self.assertTrue("amf" in full["model"])
self.assertTrue(isinstance(full["model"]["amf"], list))
self.assertItemsEqual(["amf"], full["model"]["amf"])
def test_get_mimetype(self):
self.assertEquals(bioprint.filemanager.get_mime_type("foo.stl"), "application/sla")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.gcode"), "text/plain")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.unknown"), "application/octet-stream")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.mime_map_yes"), "application/mime_map_yes")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.mime_map_no"), "application/octet-stream")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.mime_detect_yes"), "application/mime_detect_yes")
self.assertEquals(bioprint.filemanager.get_mime_type("foo.mime_detect_no"), "application/octet-stream")
def test_valid_file_type(self):
self.assertTrue(bioprint.filemanager.valid_file_type("foo.stl", type="model"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.stl", type="stl"))
self.assertFalse(bioprint.filemanager.valid_file_type("foo.stl", type="machinecode"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.foo", type="machinecode"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.foo", type="foo"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.foo"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.mime_map_yes"))
self.assertTrue(bioprint.filemanager.valid_file_type("foo.mime_detect_yes"))
self.assertFalse(bioprint.filemanager.valid_file_type("foo.unknown"))
def test_get_file_type(self):
self.assertEquals(["machinecode", "gcode"], bioprint.filemanager.get_file_type("foo.gcode"))
self.assertEquals(["machinecode", "gcode"], bioprint.filemanager.get_file_type("foo.gco"))
self.assertEquals(["machinecode", "foo"], bioprint.filemanager.get_file_type("foo.f"))
self.assertEquals(["model", "stl"], bioprint.filemanager.get_file_type("foo.stl"))
self.assertEquals(["model", "amf"], bioprint.filemanager.get_file_type("foo.amf"))
self.assertIsNone(bioprint.filemanager.get_file_type("foo.unknown"))
def test_hook_failure(self):
def hook():
raise RuntimeError("Boo!")
self.plugin_manager.get_hooks.return_value = dict(hook=hook)
with mock.patch("bioprint.filemanager.logging") as patched_logging:
logger = mock.MagicMock()
patched_logging.getLogger.return_value = logger
bioprint.filemanager.get_all_extensions()
self.assertEquals(1, len(logger.mock_calls))
class FileManagerTest(unittest.TestCase):
def setUp(self):
import bioprint.slicing
import bioprint.filemanager.storage
import bioprint.printer.profile
self.addCleanup(self.cleanUp)
# mock event manager
self.event_manager_patcher = mock.patch("bioprint.filemanager.eventManager")
event_manager = self.event_manager_patcher.start()
event_manager.return_value.fire = mock.MagicMock()
self.fire_event = event_manager.return_value.fire
# mock plugin manager
self.plugin_manager_patcher = mock.patch("bioprint.plugin.plugin_manager")
self.plugin_manager = self.plugin_manager_patcher.start()
self.analysis_queue = mock.MagicMock(spec=bioprint.filemanager.AnalysisQueue)
self.slicing_manager = mock.MagicMock(spec=bioprint.slicing.SlicingManager)
self.printer_profile_manager = mock.MagicMock(spec=bioprint.printer.profile.PrinterProfileManager)
self.local_storage = mock.MagicMock(spec=bioprint.filemanager.storage.LocalFileStorage)
self.local_storage.analysis_backlog = iter([])
self.storage_managers = dict()
self.storage_managers[bioprint.filemanager.FileDestinations.LOCAL] = self.local_storage
self.file_manager = bioprint.filemanager.FileManager(self.analysis_queue, self.slicing_manager, self.printer_profile_manager, initial_storage_managers=self.storage_managers)
def cleanUp(self):
self.event_manager_patcher.stop()
self.plugin_manager_patcher.stop()
def test_add_file(self):
wrapper = object()
self.local_storage.add_file.return_value = ("", "test.file")
self.local_storage.path_on_disk.return_value = "prefix/test.file"
test_profile = dict(id="_default", name="My Default Profile")
self.printer_profile_manager.get_current_or_default.return_value = test_profile
file_path = self.file_manager.add_file(bioprint.filemanager.FileDestinations.LOCAL, "test.file", wrapper)
self.assertEquals(("", "test.file"), file_path)
self.local_storage.add_file.assert_called_once_with("test.file", wrapper, printer_profile=test_profile, allow_overwrite=False, links=None)
self.fire_event.assert_called_once_with(bioprint.filemanager.Events.UPDATED_FILES, dict(type="printables"))
def test_remove_file(self):
self.file_manager.remove_file(bioprint.filemanager.FileDestinations.LOCAL, "test.file")
self.local_storage.remove_file.assert_called_once_with("test.file")
self.fire_event.assert_called_once_with(bioprint.filemanager.Events.UPDATED_FILES, dict(type="printables"))
def test_add_folder(self):
self.local_storage.add_folder.return_value = ("", "test_folder")
folder_path = self.file_manager.add_folder(bioprint.filemanager.FileDestinations.LOCAL, "test_folder")
self.assertEquals(("", "test_folder"), folder_path)
self.local_storage.add_folder.assert_called_once_with("test_folder", ignore_existing=True)
self.fire_event.assert_called_once_with(bioprint.filemanager.Events.UPDATED_FILES, dict(type="printables"))
def test_add_folder_not_ignoring_existing(self):
self.local_storage.add_folder.side_effect = RuntimeError("already there")
try:
self.file_manager.add_folder(bioprint.filemanager.FileDestinations.LOCAL, "test_folder", ignore_existing=False)
self.fail("Expected an exception to occur!")
except RuntimeError as e:
self.assertEquals("already there", e.message)
self.local_storage.add_folder.assert_called_once_with("test_folder", ignore_existing=False)
def test_remove_folder(self):
self.file_manager.remove_folder(bioprint.filemanager.FileDestinations.LOCAL, "test_folder")
self.local_storage.remove_folder.assert_called_once_with("test_folder", recursive=True)
self.fire_event.assert_called_once_with(bioprint.filemanager.Events.UPDATED_FILES, dict(type="printables"))
def test_remove_folder_nonrecursive(self):
self.file_manager.remove_folder(bioprint.filemanager.FileDestinations.LOCAL, "test_folder", recursive=False)
self.local_storage.remove_folder.assert_called_once_with("test_folder", recursive=False)
def test_get_metadata(self):
expected = dict(key="value")
self.local_storage.get_metadata.return_value = expected
metadata = self.file_manager.get_metadata(bioprint.filemanager.FileDestinations.LOCAL, "test.file")
self.assertEquals(metadata, expected)
self.local_storage.get_metadata.assert_called_once_with("test.file")
@mock.patch("bioprint.filemanager.util.atomic_write")
@mock.patch("io.FileIO")
@mock.patch("shutil.copyfileobj")
@mock.patch("os.remove")
@mock.patch("tempfile.NamedTemporaryFile")
@mock.patch("time.time", side_effect=[1411979916.422, 1411979932.116])
def test_slice(self, mocked_time, mocked_tempfile, mocked_os, mocked_shutil, mocked_fileio, mocked_atomic_write):
callback = mock.MagicMock()
callback_args = ("one", "two", "three")
# mock temporary file
temp_file = mock.MagicMock()
temp_file.name = "tmp.file"
mocked_tempfile.return_value = temp_file
# mock metadata on local storage
metadata = dict(hash="aabbccddeeff")
self.local_storage.get_metadata.return_value = metadata
# mock printer profile
expected_printer_profile = dict(id="_default", name="My Default Profile")
self.printer_profile_manager.get_current_or_default.return_value = expected_printer_profile
self.printer_profile_manager.get.return_value = None
# mock get_absolute_path method on local storage
def path_on_disk(path):
if isinstance(path, tuple):
import os
joined_path = ""
for part in path:
joined_path = os.path.join(joined_path, part)
path = joined_path
return "prefix/" + path
self.local_storage.path_on_disk.side_effect = path_on_disk
# mock split_path method on local storage
def split_path(path):
return "", path
self.local_storage.split_path.side_effect = split_path
# mock add_file method on local storage
def add_file(path, file_obj, printer_profile=None, links=None, allow_overwrite=False):
file_obj.save("prefix/" + path)
return "", path
self.local_storage.add_file.side_effect = add_file
# mock slice method on slicing manager
def slice(slicer_name, source_path, dest_path, profile, done_cb, printer_profile_id=None, position=None, callback_args=None, overrides=None, on_progress=None, on_progress_args=None, on_progress_kwargs=None):
self.assertEquals("some_slicer", slicer_name)
self.assertEquals("prefix/source.file", source_path)
self.assertEquals("tmp.file", dest_path)
self.assertIsNone(profile)
self.assertIsNone(overrides)
self.assertIsNone(printer_profile_id)
self.assertIsNone(position)
self.assertIsNotNone(on_progress)
self.assertIsNotNone(on_progress_args)
self.assertTupleEqual(("some_slicer", bioprint.filemanager.FileDestinations.LOCAL, "source.file", bioprint.filemanager.FileDestinations.LOCAL, "dest.file"), on_progress_args)
self.assertIsNone(on_progress_kwargs)
if not callback_args:
callback_args = ()
done_cb(*callback_args)
self.slicing_manager.slice.side_effect = slice
##~~ execute tested method
self.file_manager.slice("some_slicer", bioprint.filemanager.FileDestinations.LOCAL, "source.file", bioprint.filemanager.FileDestinations.LOCAL, "dest.file", callback=callback, callback_args=callback_args)
# assert that events where fired
expected_events = [mock.call(bioprint.filemanager.Events.SLICING_STARTED, {"stl": "source.file", "gcode": "dest.file", "progressAvailable": False}),
mock.call(bioprint.filemanager.Events.SLICING_DONE, {"stl": "source.file", "gcode": "dest.file", "time": 15.694000005722046})]
self.fire_event.call_args_list = expected_events
# assert that model links were added
expected_links = [("model", dict(name="source.file"))]
self.local_storage.add_file.assert_called_once_with("dest.file", mock.ANY, printer_profile=expected_printer_profile, allow_overwrite=True, links=expected_links)
# assert that the generated gcode was manipulated as required
expected_atomic_write_calls = [mock.call("prefix/dest.file", "wb")]
self.assertEquals(mocked_atomic_write.call_args_list, expected_atomic_write_calls)
#mocked_open.return_value.write.assert_called_once_with(";Generated from source.file aabbccddeeff\r")
# assert that shutil was asked to copy the concatenated multistream
self.assertEquals(1, len(mocked_shutil.call_args_list))
shutil_call_args = mocked_shutil.call_args_list[0]
self.assertTrue(isinstance(shutil_call_args[0][0], bioprint.filemanager.util.MultiStream))
multi_stream = shutil_call_args[0][0]
self.assertEquals(2, len(multi_stream.streams))
self.assertTrue(isinstance(multi_stream.streams[0], io.BytesIO))
# assert that the temporary file was deleted
mocked_os.assert_called_once_with("tmp.file")
# assert that our callback was called with the supplied arguments
callback.assert_called_once_with(*callback_args)
@mock.patch("os.remove")
@mock.patch("tempfile.NamedTemporaryFile")
@mock.patch("time.time", side_effect=[1411979916.422, 1411979932.116])
def test_slice_error(self, mocked_time, mocked_tempfile, mocked_os):
callback = mock.MagicMock()
callback_args = ("one", "two", "three")
# mock temporary file
temp_file = mock.MagicMock()
temp_file.name = "tmp.file"
mocked_tempfile.return_value = temp_file
# mock path_on_disk method on local storage
def path_on_disk(path):
if isinstance(path, tuple):
import os
joined_path = ""
for part in path:
joined_path = os.path.join(joined_path, part)
path = joined_path
return "prefix/" + path
self.local_storage.path_on_disk.side_effect = path_on_disk
# mock slice method on slicing manager
def slice(slicer_name, source_path, dest_path, profile, done_cb, printer_profile_id=None, position=None, callback_args=None, overrides=None, on_progress=None, on_progress_args=None, on_progress_kwargs=None):
self.assertEquals("some_slicer", slicer_name)
self.assertEquals("prefix/source.file", source_path)
self.assertEquals("tmp.file", dest_path)
self.assertIsNone(profile)
self.assertIsNone(overrides)
self.assertIsNone(printer_profile_id)
self.assertIsNone(position)
self.assertIsNotNone(on_progress)
self.assertIsNotNone(on_progress_args)
self.assertTupleEqual(("some_slicer", bioprint.filemanager.FileDestinations.LOCAL, "source.file", bioprint.filemanager.FileDestinations.LOCAL, "dest.file"), on_progress_args)
self.assertIsNone(on_progress_kwargs)
if not callback_args:
callback_args = ()
done_cb(*callback_args, _error="Something went wrong")
self.slicing_manager.slice.side_effect = slice
##~~ execute tested method
self.file_manager.slice("some_slicer", bioprint.filemanager.FileDestinations.LOCAL, "source.file", bioprint.filemanager.FileDestinations.LOCAL, "dest.file", callback=callback, callback_args=callback_args)
# assert that events where fired
expected_events = [mock.call(bioprint.filemanager.Events.SLICING_STARTED, {"stl": "source.file", "gcode": "dest.file"}),
mock.call(bioprint.filemanager.Events.SLICING_FAILED, {"stl": "source.file", "gcode": "dest.file", "reason": "Something went wrong"})]
self.fire_event.call_args_list = expected_events
# assert that the temporary file was deleted
mocked_os.assert_called_once_with("tmp.file")
# assert that time.time was only called once
mocked_time.assert_called_once()
|
import torch
from allennlp.common.registrable import Registrable
from typing import Tuple
class CoverageMatrixAttention(torch.nn.Module, Registrable):
"""
The ``CoverageMatrixAttention`` computes a matrix of attention probabilities
between the encoder and decoder outputs. The attention function has access
to the cumulative probabilities that the attention has assigned to each
input token previously. In addition to the attention probabilities, the function
should return the coverage vectors which were used to compute the distribution
at each time step as well as the new coverage vector which takes into account
the function's computation.
The module must compute the probabilities instead of the raw scores (like
the ``MatrixAttention`` module does) because the coverage vector contains
the accumulated probabilities.
"""
def forward(self,
decoder_outputs: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_mask: torch.Tensor,
coverage_vector: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes a matrix of attention scores and updates the coverage vector.
Parameters
----------
decoder_outputs: (batch_size, num_decoder_tokens, hidden_dim)
The decoder's outputs.
encoder_outputs: (batch_size, num_encoder_tokens, hidden_dim)
The encoder's outputs.
encoder_mask: (batch_size, num_encoder_tokens)
The encoder token mask.
coverage_vector: (batch_size, num_encoder_tokens)
The cumulative attention probability assigned to each input token
thus far.
Returns
-------
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The attention probabilities between each decoder and encoder hidden representations.
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The coverage vectors used to compute the corresponding attention probabilities.
torch.Tensor: (batch_size, num_encoder_tokens)
The latest coverage vector after computing
"""
raise NotImplementedError
|
for number in range(101):
if number % 3==0 and number % 5 ==0:
print("FizzBuzz")
continue
elif number % 3==0:
print("Fizz")
continue
elif number % 5==0:
print("Buzz")
continue
print(number)
|
def main():
fin = open('B-large.in','r')
fout = open('output2.txt', 'w')
cases = int(fin.readline())
for i in range(cases):
test = (fin.readline().strip())
result = combinations(test)
output = "Case #{}: {}".format((i + 1), result)
print output
fout.write(output + '\n')
def combinations(seq):
switches = 0
result = 0
if seq[0] == '-':
result += 1
for idx, val in enumerate(seq):
for idx2, val2 in enumerate(seq):
if idx2 - idx == 1 and val == '+' and val2 == '-':
switches += 1
result += switches * 2
return result
if __name__ == '__main__':
main() |
import os
from ibm_watson import SpeechToTextV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
def sttWatson(audio_input):
authenticator = IAMAuthenticator('P3_Qv43uneTlHj47J-9YhThh0JfAzPF0EN7eooqDvrm8')
speech_to_text = SpeechToTextV1(
authenticator=authenticator
)
print ('iniciar conversao')
with open(audio_input,'rb') as audio_file:
print('testes')
speech_recognition_results = speech_to_text.recognize(
audio=audio_file,
content_type='audio/wav',
model='pt-BR_BroadbandModel'
).get_result()
print(speech_recognition_results)
# return(speech_recognition_results['results'])
return(speech_recognition_results)
|
s=0
i=0
n=int(input("dati n: "))
for i in range(1,n):
if(i%3==0) and (i%5==0):
s+=i
print(s)
|
"""Evaluate a model on the task of video corpus moment retrieval
!!! This program will not run !!!
We are providing it to showcase the evaluation protocol
"""
import argparse
import json
import logging
from datetime import datetime
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
import corpus
import dataset_untrimmed
import model
import proposals
from evaluation import CorpusVideoMomentRetrievalEval
from utils import setup_logging, get_git_revision_hash
# TODO(tier-2;clean): remove this hard-coded approach
# we not only use the same arch, but also the same hyper-prm
UNIQUE_VARS = {key: [] for key in
['arch', 'loc', 'context', 'proposal_interface']}
parser = argparse.ArgumentParser(
description='Corpus Retrieval Evaluation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Data
parser.add_argument('--test-list', type=Path, required=True,
help='JSON-file with corpus instances')
parser.add_argument('--h5-path', type=Path, nargs='+',
help='HDF5-file with features')
parser.add_argument('--tags', nargs='+',
help='Tag for h5-file features')
# Architecture
parser.add_argument('--snapshot', type=Path, required=True, nargs='+',
help='JSON files of model')
parser.add_argument('--snapshot-tags', nargs='+',
help='Pair model to a given h5-path')
# Evaluation parameters
parser.add_argument('--topk', nargs='+', type=int,
default=[1, 10, 100, 1000, 10000],
help='top-k values to compute')
# Dump results and logs
parser.add_argument('--dump', action='store_true',
help='Save log in text file and json')
parser.add_argument('--logfile', type=Path, default='',
help='Logging file')
parser.add_argument('--n-display', type=float, default=0.2,
help='logging rate during epoch')
parser.add_argument('--disable-tqdm', action='store_true',
help='Disable progress-bar')
parser.add_argument('--dump-per-instance-results', action='store_true',
help='HDF5 with results')
parser.add_argument('--reduced-dump', action='store_true',
help='Only dump video indices per query')
parser.add_argument('--enable-tb', action='store_true',
help='Log to tensorboard. Nothing logged by this program')
# Debug
parser.add_argument('--debug', action='store_true',
help=('yield incorrect results! to verify things are'
'glued correctly (dataset, model, eval)'))
args = parser.parse_args()
def main(args):
"Put all the pieces together"
if args.dump_per_instance_results:
args.dump = True
if args.dump:
args.disable_tqdm = True
if len(args.logfile.name) == 0:
basename_fusion = [str(i.with_suffix('').with_name(i.stem))
for i in args.snapshot]
args.logfile = Path('-'.join(basename_fusion) + '_corpus-eval')
if args.logfile.exists():
raise ValueError(
f'{args.logfile} already exists. Please provide a logfile or'
'backup existing results.')
setup_logging(args)
logging.info('Corpus Retrieval Evaluation for CAL/MCN')
logging.info(f'Git revision hash: {get_git_revision_hash()}')
load_hyperparameters(args)
logging.info(args)
engine_prm = {}
if args.arch == 'MCN':
args.dataset = 'UntrimmedMCN'
args.engine = 'MomentRetrievalFromProposalsTable'
elif args.arch == 'SMCN':
args.dataset = 'UntrimmedSMCN'
args.engine = 'MomentRetrievalFromClipBasedProposalsTable'
else:
ValueError('Unknown/unsupported architecture')
logging.info('Loading dataset')
dataset_novisual = True
dataset_cues = {feat: None for feat in args.tags}
if args.h5_path:
for i, key in enumerate(args.tags):
dataset_cues[key] = {'file': args.h5_path[i]}
dataset_novisual = False
clip_length = None
else:
clip_length = args.clip_length
proposals_interface = proposals.__dict__[args.proposal_interface](
args.min_length, args.scales, args.stride)
dataset_setup = dict(
json_file=args.test_list, cues=dataset_cues, loc=args.loc,
context=args.context, debug=args.debug, eval=True,
no_visual=dataset_novisual,
proposals_interface=proposals_interface,
clip_length=clip_length
)
dataset = dataset_untrimmed.__dict__[args.dataset](**dataset_setup)
if args.arch == 'SMCN':
logging.info('Set padding on UntrimmedSMCN dataset')
dataset.set_padding(False)
logging.info('Setting up models')
models_dict = {}
for i, key in enumerate(args.snapshot_tags):
arch_setup = dict(
visual_size=dataset.visual_size[key],
lang_size=dataset.language_size,
max_length=dataset.max_words,
embedding_size=args.embedding_size,
visual_hidden=args.visual_hidden,
lang_hidden=args.lang_hidden,
visual_layers=args.visual_layers,
)
models_dict[key] = model.__dict__[args.arch](**arch_setup)
filename = args.snapshot[i].with_suffix('.pth.tar')
snapshot_ = torch.load(
filename, map_location=lambda storage, loc: storage)
models_dict[key].load_state_dict(snapshot_['state_dict'])
models_dict[key].eval()
logging.info('Creating database alas indexing corpus')
engine = corpus.__dict__[args.engine](dataset, models_dict, **engine_prm)
engine.indexing()
logging.info('Launch evaluation...')
# log-scale up to the end of the database
if len(args.topk) == 1 and args.topk[0] == 0:
exp = int(np.floor(np.log10(engine.num_moments)))
args.topk = [10**i for i in range(0, exp + 1)]
args.topk.append(engine.num_moments)
num_instances_retrieved = []
judge = CorpusVideoMomentRetrievalEval(topk=args.topk)
args.n_display = max(int(args.n_display * len(dataset.metadata)), 1)
for it, query_metadata in tqdm(enumerate(dataset.metadata),
disable=args.disable_tqdm):
result_per_query = engine.query(
query_metadata['language_input'],
return_indices=args.dump_per_instance_results)
if args.dump_per_instance_results:
vid_indices, segments, proposals_ind = result_per_query
else:
vid_indices, segments = result_per_query
judge.add_single_predicted_moment_info(
query_metadata, vid_indices, segments, max_rank=engine.num_moments)
num_instances_retrieved.append(len(vid_indices))
if args.disable_tqdm and (it + 1) % args.n_display == 0:
logging.info(f'Processed queries [{it}/{len(dataset.metadata)}]')
if args.dump_per_instance_results:
# TODO: wrap-up this inside a class. We could even dump in a
# non-blocking thread using a Queue
if it == 0:
filename = args.logfile.with_suffix('.h5')
fid = h5py.File(filename, 'x')
if args.reduced_dump:
fid_vi = fid.create_dataset(
name='vid_indices',
chunks=True,
shape=(len(dataset), dataset.num_videos),
dtype='int64')
else:
fid.create_dataset(
name='proposals', data=engine.proposals, chunks=True)
fid_vi = fid.create_dataset(
name='vid_indices',
chunks=True,
shape=(len(dataset),) + vid_indices.shape,
dtype='int64')
fid_pi = fid.create_dataset(
name='proposals_ind',
chunks=True,
shape=(len(dataset),) + proposals_ind.shape,
dtype='int64')
if args.reduced_dump:
fid_vi[it, ...] = pd.unique(vid_indices.numpy())
else:
fid_vi[it, ...] = vid_indices
fid_pi[it, ...] = proposals_ind
if args.dump_per_instance_results:
fid.close()
logging.info('Summarizing results')
num_instances_retrieved = np.array(num_instances_retrieved)
logging.info(f'Number of queries: {len(judge.map_query)}')
logging.info(f'Number of proposals: {engine.num_moments}')
retrieved_proposals_median = int(np.median(num_instances_retrieved))
retrieved_proposals_min = int(num_instances_retrieved.min())
if (num_instances_retrieved != engine.num_moments).any():
logging.info('Triggered approximate search')
logging.info('Median numbers of retrieved proposals: '
f'{retrieved_proposals_median:d}')
logging.info('Min numbers of retrieved proposals: '
f'{retrieved_proposals_min:d}')
result = judge.evaluate()
_ = [logging.info(f'{k}: {v}') for k, v in result.items()]
if args.dump:
filename = args.logfile.with_suffix('.json')
logging.info(f'Dumping results into: {filename}')
with open(filename, 'x') as fid:
for key, value in result.items():
result[key] = float(value)
result['snapshot'] = [str(i) for i in args.snapshot]
result['corpus'] = str(args.test_list)
result['topk'] = args.topk
result['iou_threshold'] = judge.iou_thresholds
result['median_proposals_retrieved'] = retrieved_proposals_median
result['min_proposals_retrieved'] = retrieved_proposals_min
result['date'] = datetime.now().isoformat()
result['git_hash'] = get_git_revision_hash()
json.dump(result, fid, indent=1)
def load_hyperparameters(args):
"Update args with model hyperparameters"
if args.tags is None:
# Parse single model
assert len(args.snapshot) == 1
logging.info('Parsing single JSON file with hyper-parameters')
with open(args.snapshot[0], 'r') as fid:
if args.h5_path:
assert len(args.h5_path) == 1
hyper_prm = json.load(fid)
args.tags = {hyper_prm['feat']: None}
args.snapshot_tags = [hyper_prm['feat']]
for key, value in hyper_prm.items():
if not hasattr(args, key):
setattr(args, key, value)
return
logging.info('Parsing multiple JSON files with hyper-parameters')
args.tags = dict.fromkeys(args.tags)
assert len(args.h5_path) == len(args.tags)
for i, filename in enumerate(args.snapshot):
with open(filename, 'r') as fid:
hyper_prm = json.load(fid)
assert args.snapshot_tags[i] in args.tags
for key, value in hyper_prm.items():
if not hasattr(args, key):
setattr(args, key, value)
if key in UNIQUE_VARS:
UNIQUE_VARS[key].append(value)
for value in UNIQUE_VARS.values():
assert len(np.unique(value)) == 1
if __name__ == '__main__':
main(args)
|
#from oauth2_provider import
__author__ = 'dmorina'
from oauth2_provider.oauth2_backends import OAuthLibCore, get_oauthlib_core
from oauth2client import client
from oauthlib.common import urlencode, urlencoded, quote
def oauth_create_client(user, client_name):
#r_client = client.
#r_client.save()
#return r_client
pass
class Oauth2Backend(OAuthLibCore):
def _extract_params(self, request):
"""
Extract parameters from the Django request object. Such parameters will then be passed to
OAuthLib to build its own Request object. The body should be encoded using OAuthLib urlencoded
"""
uri = self._get_escaped_full_path(request)
http_method = request.method
headers = {}#self.extract_headers(request)
body = urlencode(self.extract_body(request))
return uri, http_method, body, headers
def create_token_response(self, request):
"""
A wrapper method that calls create_token_response on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
print(headers)
headers, body, status = get_oauthlib_core().server.create_token_response(uri, http_method, body,
headers)
uri = headers.get("Location", None)
return uri, headers, body, status
def extract_body(self, request):
"""
Extracts the POST body from the Django request object
:param request: The current django.http.HttpRequest object
:return: provided POST parameters
"""
print("extract body")
print(request.data)
return request.data.items() |
'''Functions'''
from datetime import datetime
from time import sleep
# Write a function with def()
# -----------------------------------------------------------------------------
# Function names can start with letters or _ and contain only letters, numbers
# and _. Pass means do noting but move on. It's a placeholder for future code.
# NOTE: it's good practice to put two spaces after each function definition,
# unless they're nested inside another function or class.
def myfunction(num1, num2): # num1, num2 are *parameters*
pass
# Call the function()
# -----------------------------------------------------------------------------
myfunction(1, 2) # 1, 2 are *arguments*
# Reminder: return vs print
# -----------------------------------------------------------------------------
def myfunction1(num1, num2):
print(num1 * num2) # prints the result but returns None
def myfunction2(num1, num2):
return num1 * num2 # prints nothing but returns the result
# example:
def heading(arg):
return '{0:-^80}'.format(str(arg).title())
h = heading('Positional Arguments')
print(h)
# Positional Arguments
# -----------------------------------------------------------------------------
def menu(wine, cheese, dessert):
return {'wine': wine, 'cheese': cheese, 'dessert': dessert}
print(menu('chardonnay', 'cake', 'swiss'))
# {'wine': 'chardonnay', 'cheese': 'cake', 'dessert': 'swiss'}
# Keyword Arguments
# -----------------------------------------------------------------------------
print(menu(dessert='cake', cheese='swiss', wine='chardonnay'))
# {'wine': 'chardonnay', 'cheese': 'swiss', 'dessert': 'cake'}
# Keyword-only arguments
# -----------------------------------------------------------------------------
# In the examples above, we see that it's optional as to whether we use
# keywords when calling the function. If you feel that for the sake of
# clarity, keywords should be mandatory, you can specify this by using '*'.
# The '*' in the argument list indicates the end of positional arguments and
# the beginning of keyword-only arguments. This way there will be no confusion.
def menu(wine, cheese, *, courses=3, guests=1):
return {'wine': wine, 'cheese': cheese}
# menu('merlot', 'brie', 2, 4)
# TypeError: menu() takes 2 positional arguments but 4 were given
menu('merlot', 'brie', guests=2, courses=4)
# Positional-only arguments
# -----------------------------------------------------------------------------
# Python 3.8 introduced a new function parameter syntax / to indicate that some
# function parameters must be specified positionally and cannot be used as
# keyword arguments. I think one of the reasons they did this was to allow
# pure python functions to emulate behaviors of existing C coded functions.
def menu(wine, cheese, /, *, courses=3, guests=1):
return {'wine': wine, 'cheese': cheese}
# print(menu(cheese='burrata', wine='chardonnay', guests=2, courses=4))
# TypeError: menu() got some positional-only arguments passed as keyword arguments: 'wine, cheese'
print(menu('chardonnay', 'burrata', guests=2, courses=4))
# {'wine': 'chardonnay', 'cheese': 'burrata'}
# Use None to specify dynamic default values
# -----------------------------------------------------------------------------
# In this example the function is expected to run each time with a fresh empty
# result list, add the argument to it, and then print the single-item list.
# However, it's only empty the first time it's called. The second time, result
# still has one item from the previous call. The reason for this is that
# default argument values are evaluated only once per module load (which
# usually happens when a program starts up). To be precise, the default values
# are generated at the point the function is defined, not when it's called.
def buggy(arg, result=[]):
result.append(arg)
print(result)
buggy('a') # ['a']
buggy('b') # ['a', 'b']
buggy('c', []) # ['c']
# This next example works better to ensure we have an empty list each time,
# however we no longer have the option of passing in a list:
def works(arg):
result = []
result.append(arg)
print(result)
works('a') # ['a']
works('b') # ['b']
# Correct the first example by passing in None to indicate the first call:
def nonbuggy(arg, result=None):
if result is None:
result = []
result.append(arg)
print(result)
# or more common method of writing it:
def nonbuggy(arg, result=None):
result = result if result else []
result.append(arg)
print(result)
nonbuggy('a') # ['a']
nonbuggy('b') # ['b']
nonbuggy('new list', ['hello']) # ['hello', 'new list']
# A more practical example of this situation would be where we want to set
# a default value using a timestamp. In this case, we want to use a function
# that gets the current time. If we put the function as the default value,
# the function will only evaluate once, therefor the time will never update:
def log(message, timestamp=datetime.now()):
print(f'{timestamp}: {message}')
log('hello')
sleep(1)
log('hello again')
# 2018-02-06 15:46:31.847122: hello
# 2018-02-06 15:46:31.847122: hello again
# Instead use None as the default, along with a compact expression:
def log(message, timestamp=None):
timestamp = timestamp if timestamp else datetime.now()
print(f'{timestamp}: {message}')
log('hello')
sleep(1)
log('hello again')
# 2018-02-06 15:46:32.852450: hello
# 2018-02-06 15:46:33.857498: hello again
# Gathering Positional Arguments - *args
# -----------------------------------------------------------------------------
# The * operator used when defining a function means that any extra positional
# arguments passed to the function end up in the variable prefaced with the *.
# In short, args is a tuple and * unpacks the tuple
def print_args(*args):
print(args, type(args))
print_args(1, 2, 3, 'hello') # (1, 2, 3, 'hello') <class 'tuple'>
print_args(1) # (1,) <class 'tuple'>
# The * operator can also be used when calling functions and here it means the
# analogous thing. A variable prefaced by * when calling a function means that
# the variable contents should be extracted and used as positional arguments.
def add(x, y):
return x + y
nums = [13, 7]
add(*nums) # returns 20
# This example uses both methods at the same time:
def add(*args):
result = 0
for num in args:
result += num
return result
nums = [13, 7, 10, 40, 30]
add(*nums) # returns 100
# You can have required and optional parameters. The required ones come first:
def print_more(required1, required2, *args):
print('first argument is required:', required1)
print('second argument is required:', required2)
print('the rest:', args)
print_more('red', 'green')
# first argument is required: red
# second argument is required: green
# the rest: ()
print_more('red', 'green', 'one', 'two', 'three')
# first argument is required: red
# second argument is required: green
# the rest: ('one', 'two', 'three')
# Gathering Keyword Arguments - **kwargs
# -----------------------------------------------------------------------------
# ** does for dictionaries & key/value pairs exactly what * does for iterables
# and positional parameters demonstrated above. Here's it being used in the
# function definition:
def print_kwargs(**kwargs):
print(kwargs, type(kwargs))
print_kwargs(x=1, y=2, z='hi') # {'x': 1, 'y': 2, 'z': 'hi'} <class 'dict'>
# And here we're using it in the function call:
def add(x, y):
return x + y
nums = {'x': 13, 'y': 7}
add(**nums) # returns 20
# And here we're using it in both places"
def print_kwargs(**kwargs):
for key in kwargs:
print(key, 'en francais est', kwargs[key])
colours = {'red': 'rouge', 'yellow': 'jaune', 'green': 'vert', 'black': 'noir'}
print_kwargs(**colours)
# red en francais est rouge
# yellow en francais est jaune
# green en francais est vert
# black en francais est noir
# see also terminology.py for another example that feeds dictionary values
# to a class instance.
# Docstrings
# -----------------------------------------------------------------------------
def myfunction1(arg):
'''This is where you can provide a brief description of the function'''
print(arg)
def myfunction2(arg):
'''
The first line should be a short concise description.
Followed by a space, and then the extended description.
See documenting_naming.py or any of the python standard library modules
for more information and examples.
'''
print(arg)
print(myfunction1.__doc__)
print(myfunction2.__doc__)
# Functions as Arguments
# -----------------------------------------------------------------------------
# Functions are objects, just like numbers, strings, tuples, lists,
# dictionaries, etc. They can be assigned to variables, used as arguments to
# other functions, or returned from other functions.
def answer():
print(100)
def run_something(func):
func()
run_something(answer) # 100
# If the function you're passing as an arg requires its own args, just pass
# them following the function name:
def add_numbers(a, b):
print(a + b)
def run_something_with_args(func, arg1, arg2):
func(arg1, arg2)
run_something_with_args(add_numbers, 5, 10) # 15
# An example with a variable number of arguments:
def sum_numbers(*args):
print(sum(args))
def run_with_positional_args(func, *args):
return func(*args)
run_with_positional_args(sum_numbers, 2, 3, 1, 4) # 10
# Functions as attributes (& monkey patching)
# -----------------------------------------------------------------------------
# Since functions are objects, they can get set as callable attributes on other
# objects. In addition, you can add or change a function on an instantiated
# object. Consider this:
class A():
def method(self):
print("I'm from class A")
def function():
print("I'm not from class A")
a = A()
a.method() # I'm from class A
a.method = function
a.method() # I'm not from class A
# This method of adding or replacing functions is often referred to as
# monkey-patching. Doing this kind of thing can cause situations that are
# difficult to debug. That being said, it does have its uses. Often, it's
# used in automated testing. For example, if testing a client-server app,
# we may not want to actually connect to the server while testing it; this may
# result in accidental transfers of funds or test e-mails being sent to real
# people. Instead, we can set up our test code to replace some of the key
# methods on the object.
# Monkey-patching can also be used to fix bugs or add features in third-party
# code that we are interacting with, and does not behave quite the way we need
# it to. It should, however, be applied sparingly; it's almost always a
# "messy hack". Sometimes, though, it is the only way to adapt an existing
# library to suit our needs.
# Nested functions
# -----------------------------------------------------------------------------
# This is pretty straight forward. When we call the outer() function, it in
# turn calls the inner function. The inner function used a variable x that's
# defined in the outer functions namespace. The inner function looks for x
# first in its own local namespace, then failing that looks in the surrounding
# namespace. If it didn't find it there, it would check the global namespace
# next (see namespaces.py).
def outer():
x = 1
def inner():
print(x)
inner()
outer() # 1
# Closure
# -----------------------------------------------------------------------------
# Consider that the namespace created for our functions are created from
# scratch each time the function is called and then destroyed when the
# function ends. According to this, the following should not work.
def outer():
x = 1
def inner():
print(x)
return inner
a = outer()
print(outer) # <function outer at 0x1014a3510>
print(a) # <function outer.<locals>.inner at 0x100762e18>
# At first glance, since we are returning inner from the outer function and
# assigning it to a new variable, that new variable should no longer have
# access to x because x only exists while the outer function is running.
a() # 1
# But it does work because of a special feature called closure. An inner
# function knows the value of x that was passed in and remembers it. The line
# return inner returns this specialized copy of the inner function (but
# doesn't call it). That's a closure... a dynamically created function that
# remembers where it came from.
def outer(x):
def inner():
print(x)
return inner
a = outer(2)
b = outer(3)
a() # 2
b() # 3
# From this example you can see that closures - the fact that functions
# remember their enclosing scope - can be used to build custom functions that
# have, essentially, a hard coded argument. We aren’t passing the numbers
# to our inner function but are building custom versions of our inner function
# that "remembers" what number it should print.
# lambda()
# -----------------------------------------------------------------------------
# The lambda function is an anonymous function expressed as a single statement
# Use it instead of a normal tiny function.
def edit_story(words, func):
for word in words:
print(func(word))
sounds = ['thud', 'hiss', 'meow', 'tweet']
def headline(testing):
return testing.capitalize() + '!'
edit_story(sounds, headline)
# Using lambda, the headline function can be replaced this way:
edit_story(sounds, lambda word: word.capitalize() + '!')
# Note that the lambda definition does not include a "return" statement –
# it always contains an expression which is returned. Also note that you can
# put a lambda definition anywhere a function is expected, and you don't have
# to assign it to a variable at all.
|
#!/usr/bin/env python
# Copyright (C) 2013 Andy Aschwanden
from sys import stderr
from argparse import ArgumentParser
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
from osgeo import ogr
# Set up the option parser
parser = ArgumentParser()
parser.description = "All values within a polygon defined by a shapefile are replaced by a scalar value."
parser.add_argument("FILE", nargs=2)
parser.add_argument("-s", "--scalar_value",dest="scalar_value", type=float,
help="Replace with this value",default=0.)
parser.add_argument("-v", "--variables",dest="variables",
help="Comma separated list of variables.",default=['bmelt'])
options = parser.parse_args()
args = options.FILE
scalar_value = options.scalar_value
variables = options.variables.split(',')
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(args[0], 0)
layer = data_source.GetLayer(0)
srs=layer.GetSpatialRef()
# Make sure we use lat/lon coordinates.
# Fixme: allow reprojection onto lat/lon if needed.
if not srs.IsGeographic():
print('''Spatial Reference System in % s is not lat/lon. Exiting.'''
% filename)
import sys
sys.exit(0)
feature = layer.GetFeature(0)
nc = NC(args[1], 'a')
var = 'lat'
try:
lat = nc.variables[var]
except:
print(("ERROR: variable '%s' not found but needed... ending ..."
% var))
import sys
sys.exit()
var = 'lon'
try:
lon = nc.variables[var]
except:
print(("ERROR: variable '%s' not found but needed... ending ..."
% var))
import sys
sys.exit()
for var in variables:
try:
data = nc.variables[var]
except:
print(("ERROR: variable '%s' not found but needed... ending ..."
% var))
import sys
sys.exit()
counter = 0
ndim = data.ndim
stderr.write("\n - Processing variable %s, precent done: " % var)
stderr.write("000")
if (ndim==2):
M = data.shape[0]
N = data.shape[1]
max_counter = M*N
for m in range(0, M):
for n in range(0, N):
x = lon[m,n]
y = lat[m,n]
wkt = "POINT(%f %f)" % (x,y)
point = ogr.CreateGeometryFromWkt(wkt)
if feature.GetGeometryRef().Contains(point):
data[m,n] = scalar_value
stderr.write("\b\b\b%03d" % (100.0 * counter / max_counter))
counter += 1
elif (ndim==3):
K = data.shape[0]
M = data.shape[1]
N = data.shape[2]
max_counter = K*M*N
for k in range(0, K):
for m in range(0, M):
for n in range(0, N):
x = lon[m,n]
y = lat[m,n]
wkt = "POINT(%f %f)" % (x,y)
point = ogr.CreateGeometryFromWkt(wkt)
if feature.GetGeometryRef().Contains(point):
data[k,m,n] = scalar_value
stderr.write("\b\b\b%03d" % (100.0 * counter / max_counter))
counter += 1
else:
print(("ERROR: %i dimensions currently not supported... ending..."
% ndim))
nc.close()
|
nn=float(input("please enter first number\n"))
mm=float(input("please enter second number\n"))
o=(input("pleae enter your operation\n"))
if o=="+":
print(f"{nn}+{mm}={nn+mm}\n")
elif o=="*":
print(f"{nn}*{mm}={nn*mm}\n")
elif o=="/":
print(f"{nn}/{mm}={nn/mm}\n")
elif o=="-":
print(f"{nn}-{mm}={nn-mm}\n")
elif o=="**":
print(f"{nn}**{mm}={nn**mm}\n")
else:
(print(f" Man balad nistam"))
|
"""
Similar to longest-increasing-subsequence
"""
def get_mis(arr):
n = len(arr)
mis = []
for i in arr:
mis.append(i)
for i in range(1, n):
for j in range(0, i):
if arr[i] > arr[j] and (mis[j]+arr[i]) > mis[i]:
mis[i] = mis[j]+arr[i]
print(mis)
arr = [7, 1, 4, 8, 11, 2, 14, 3]
get_mis(arr) |
from enum import Enum, unique
@unique
class EventType(Enum):
UNKNOWN = -1
DEL = 0
NEW = 1
|
"""Определяем схемы URL для blogs"""
from django.urls import path, re_path
from .import views
urlpatterns = [
path('', views.index, name='index'),
path('notes/', views.notes, name='notes'),
re_path(r'^notes/(?P<note_id>\d+)/$', views.note, name='note'),
path('new/', views.new, name='new'),
re_path(r'^edit_blog/(?P<note_id>\d+)/$', views.edit_blog, name='edit_blog')
]
|
import pymesh
from pymesh.TestCase import TestCase
import numpy as np
import numpy.linalg
class HarmonicSolverTest(TestCase):
def test_linear_function(self):
mesh = pymesh.generate_icosphere(1.0, np.zeros(3), 2);
tetgen = pymesh.tetgen();
tetgen.points = mesh.vertices;
tetgen.triangles = mesh.faces;
tetgen.max_tet_volume = 0.1;
tetgen.verbosity = 0;
tetgen.run();
mesh = tetgen.mesh;
self.assertLess(0, mesh.num_voxels);
# Test solver to reproduce linear coordinate functions.
solver = pymesh.HarmonicSolver.create(mesh);
bd_indices = np.unique(mesh.faces.ravel());
bd_values = mesh.vertices[bd_indices, 0];
solver.boundary_indices = bd_indices;
solver.boundary_values = bd_values;
solver.pre_process();
solver.solve();
sol = solver.solution.ravel();
self.assertEqual(mesh.num_vertices, len(sol));
self.assert_array_almost_equal(mesh.vertices[:, 0], sol, 12);
def test_radial_function(self):
mesh = pymesh.generate_icosphere(1.0, [1.0, 1.0, 1.0], 2);
tetgen = pymesh.tetgen();
tetgen.points = mesh.vertices;
tetgen.triangles = mesh.faces;
tetgen.max_tet_volume = 0.001;
tetgen.verbosity = 0;
tetgen.run();
mesh = tetgen.mesh;
self.assertLess(0, mesh.num_voxels);
# Test solver to reproduce linear coordinate functions.
solver = pymesh.HarmonicSolver.create(mesh);
bd_indices = np.unique(mesh.faces.ravel());
target_solution = 1.0 / numpy.linalg.norm(mesh.vertices, axis=1);
bd_values = target_solution[bd_indices];
solver.boundary_indices = bd_indices;
solver.boundary_values = bd_values;
solver.order = 1;
solver.pre_process();
solver.solve();
sol = solver.solution.ravel();
#mesh.add_attribute("target_solution");
#mesh.set_attribute("target_solution", target_solution);
#mesh.add_attribute("solution");
#mesh.set_attribute("solution", sol);
#pymesh.save_mesh("tmp.msh", mesh, *mesh.attribute_names);
self.assertEqual(mesh.num_vertices, len(sol));
self.assert_array_almost_equal(target_solution, sol, 2);
# TODO: accuracy is not great...
def test_2D(self):
tri = pymesh.triangle();
tri.points = np.array([
[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
]);
tri.keep_convex_hull = True;
tri.max_area = 0.0001;
tri.verbosity = 0;
tri.run();
mesh = tri.mesh;
self.assertLess(0, mesh.num_faces);
solver = pymesh.HarmonicSolver.create(mesh);
bd_indices = mesh.boundary_vertices.ravel();
# f(x,y) = ln(x^2 + y^2) is known to be harmonic.
target_solution = np.log(np.square(numpy.linalg.norm(mesh.vertices +
1.0, axis=1)));
bd_values = target_solution[bd_indices];
solver.boundary_indices = bd_indices;
solver.boundary_values = bd_values;
solver.order = 1;
solver.pre_process();
solver.solve();
sol = solver.solution.ravel();
#mesh.add_attribute("target_solution");
#mesh.set_attribute("target_solution", target_solution);
#mesh.add_attribute("solution");
#mesh.set_attribute("solution", sol);
#pymesh.save_mesh("tmp.msh", mesh, *mesh.attribute_names);
self.assertEqual(mesh.num_vertices, len(sol));
self.assert_array_almost_equal(target_solution, sol, 3);
|
from io import StringIO
import pandas as pd
import numpy as np
import sqlite3
from sklearn.impute import SimpleImputer
def get_data(data_set) -> (pd.DataFrame, np.ndarray):
# TODO get latest loaded csv
df = pd.read_csv(StringIO(data_set))
y = np.array(df['state'])
X = df.drop(['state'], axis=1)
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
X[X.columns] = imp.fit_transform(X, y)
return X, y
def get_data_from_csv(data_set) -> (pd.DataFrame, np.ndarray):
# TODO get latest loaded csv
df = pd.read_csv(data_set, index_col=False)
y = np.array(df['state'])
X = df.drop(['state'], axis=1)
return X, y
# if __name__ == '__main__':
# get_data()
|
from pathlib import Path
import pytest
from flit.common import Metadata
from flit.inifile import read_pkg_ini
samples_dir = Path(__file__).parent / 'samples'
def test_extras():
info = read_pkg_ini(samples_dir / 'extras.toml')
assert info['metadata']['requires_extra']['test'] == ['pytest']
assert info['metadata']['requires_extra']['custom'] == ['requests']
def test_extras_dev_conflict():
info = read_pkg_ini(samples_dir / 'extras-dev-conflict.toml')
with pytest.raises(ValueError, match=r'Ambiguity'):
Metadata(dict(name=info['module'], version='0.0', summary='', **info['metadata']))
def test_extras_dev_warning(caplog):
info = read_pkg_ini(samples_dir / 'extras-dev-conflict.toml')
info['metadata']['requires_extra'] = {}
meta = Metadata(dict(name=info['module'], version='0.0', summary='', **info['metadata']))
assert '“dev-requires = ...” is obsolete' in caplog.text
assert set(meta.requires_dist) == {'apackage; extra == "dev"'}
def test_extra_conditions():
info = read_pkg_ini(samples_dir / 'extras.toml')
meta = Metadata(dict(name=info['module'], version='0.0', summary='', **info['metadata']))
assert set(meta.requires_dist) == {'toml', 'pytest; extra == "test"', 'requests; extra == "custom"'}
|
import jswRSA
import pickle
import hashlib
class DigitalSignaturer:
def __init__(self,privateKey=None,publicKey=None):
if (publicKey is not None):
publicKeyFile = open(publicKey, 'rb')
resotredPublicKey = pickle.load(publicKeyFile)
self.rsa = jswRSA.jswRSA(publicKey=resotredPublicKey)
publicKeyFile.close()
if (privateKey is not None):
privateKeyFile = open(privateKey, 'rb')
resotredprivateKey = pickle.load(privateKeyFile)
self.rsa = jswRSA.jswRSA(privateKey=resotredprivateKey)
privateKeyFile.close()
def AddDigitalSignature(self,srcFileNmae,destFileNmae):
MessageFile = open(srcFileNmae, 'rb')
SalesMessage = MessageFile.read()
SalesMessageMD5 = int(hashlib.md5(SalesMessage).hexdigest(), 16)
Tag = self.rsa.Encrypt(SalesMessageMD5)
pickle.dump((SalesMessage, Tag), open(destFileNmae, 'wb'))
MessageFile.close()
def ReadFileWithDigitalSignature(self,srcFileNmae,writeContenToFile=None):
DigitalSignatureFile = open(srcFileNmae, 'rb')
DigitalSignature = pickle.load(DigitalSignatureFile)
ResotredMessage, ResotredTag = DigitalSignature
if (int(hashlib.md5(ResotredMessage).hexdigest(),16) == self.rsa.Decrypt(ResotredTag)):
print("Generating File......")
if writeContenToFile is not None:
destFile = open(writeContenToFile, 'wb')
destFile.write(ResotredMessage)
destFile.close()
else:
print("MD5 is not matched!")
if writeContenToFile is not None:
destFile = open(writeContenToFile, 'w')
destFile.write("MD5 is not matched!")
destFile.close()
DigitalSignatureFile.close()
myDigitalSignaturer= DigitalSignaturer(publicKey='PublicKey.rsa')
myDigitalSignaturer.AddDigitalSignature(srcFileNmae="MessageForYou.txt",destFileNmae="MessageForYouWith.DigitalSignature")
''''''
myDeDigitalSignaturer= DigitalSignaturer(privateKey='PrivateKey.rsa')
myDeDigitalSignaturer.ReadFileWithDigitalSignature("MessageForYouWith.DigitalSignature",writeContenToFile="Undo_MessageForYou.txt")
|
from django.apps import AppConfig
class TrashOSConfig(AppConfig):
name = 'trashos_server'
verbose_name = "Trash OS Server"
|
from tkinter import *
from tkinter.messagebox import *
from socket import *
import threading,sys,json,re
from MainPage import *
##socket
HOST = '127.0.0.1'
PORT = 8022
BUFFERSIZE = 1024
ADDR = (HOST, PORT)
myre = r"^[_a-zA-Z]\w{0,}"
tcpClintSock = socket(AF_INET, SOCK_STREAM)
class LoginPage(object):
def __init__(self, master=None, tcpCliSock = None):
self.root = master #定义内部变量root
self.root.geometry('%dx%d+%d+%d' % (300, 200, 500, 250)) #设置窗口大小
self.username = StringVar()
self.password = StringVar()
self.createPage()
self.root.protocol('WM_DELETE_WINDOW', self.closeWindow)
self.tcpCliSock = tcpCliSock
def createPage(self):
self.page = Frame(self.root) #创建Frame
self.page.pack()
Label(self.page).grid(row=0, sticky=W)
Label(self.page, text = '账户: ').grid(row=1, column=1,sticky=W, pady=10)
Entry(self.page, textvariable=self.username).grid(row=1, column=2, sticky=E)
Label(self.page, text = '密码: ').grid(row=2, column=1,sticky=W, pady=10)
Entry(self.page, textvariable=self.password, show='*').grid(row=2, column=2, sticky=E)
ttk.Button(self.page, text='注册', command=self.register).grid(row=3,column=1, sticky=W, pady=10)
ttk.Button(self.page, text='登陆', command=self.login).grid(row=3, column=2, sticky=E)
def register(self):
if not self.re_check():
return None
else:
name = self.username.get()
secret = self.password.get()
if len(name)==0 or len(secret)==0:
showinfo(title='错误', message='账号和密码不能为空!')
else:
regInfo = [name, secret, 'register']
datastr = json.dumps(regInfo)
self.tcpCliSock.send(datastr.encode('utf-8'))
data =self.tcpCliSock.recv(BUFFERSIZE)
data = data.decode()
print("rrrrrrrr :", data)
if data == '0':
print('success to register!')
showinfo(title='成功', message='恭喜您注册成功!')
# self.page.quit()
self.page.destroy()
MainPage(self.root, self.tcpCliSock, self.username)
# return True
elif data == '1':
print('Failed to register, account existed!')
showinfo(title='错误', message='账号已存在,注册失败')
return False
else:
print('Failed for exceptions!')
showinfo(title='错误', message='服务器出错!')
return False
def login(self):
name = self.username.get()
secret = self.password.get()
if len(name) == 0 or len(secret) == 0:
showinfo(title='错误', message='账号和密码不能为空!')
else:
loginInof = [name, secret, 'login']
datastr = json.dumps(loginInof)
self.tcpCliSock.send(datastr.encode('utf-8'))
data = self.tcpCliSock.recv(BUFFERSIZE).decode()
print("llllll :", data)
# 0:ok, 1:usr not exist, 2:usr has logged in
if data == '0':
print('Success to login!{}'.format(data))
showinfo(title='成功',message='登录成功!')
self.page.destroy()
MainPage(self.root, self.tcpCliSock, self.username)
elif data == '1':
print('Failed to login in(user not exist or username not match the password)!')
showinfo(title='错误', message='登录失败,请检查用户名和密码')
return False
elif data == '2':
print('you have been online!')
showinfo(title='错误', message='您已登录,不可重复登录')
return False
elif data == '3':
print('secret wrong!')
showinfo(title='错误', message='密码错误')
return False
def re_check(self):
name = self.username.get()
secret = self.username.get()
if not re.findall(myre, name):
showinfo(title='错误', message='账号不符合规范')
return False
elif not re.findall(myre, secret):
showinfo(title='错误', message='密码不符合规范')
return False
else:
return True
def closeWindow(self):
ans = askyesno(title="提示", message="确定要退出?")
if ans:
self.tcpCliSock.close()
self.root.destroy()
else:
pass
def main():
tcpClintSock.connect(ADDR)
root = Tk()
root.title("Let's chat")
root.iconbitmap("chat-icon.ico")
root.resizable(0, 0) # 禁止调整窗口大小
LoginPage(root, tcpClintSock)
root.mainloop()
if __name__ == '__main__':
main()
|
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, ListView
from autos.forms import autosForm
from .models import autos
class autosListView(ListView):
model = autos
context_object_name = 'all_the_autoss'
template_name = 'autos-list.html'
class autosDetailView(DetailView):
model = autos
context_object_name = 'that_one_autos'
template_name = 'autos-detail.html'
class autosCreateView(CreateView):
model = autos
form_class = autosForm
template_name = 'autos-create.html'
success_url = reverse_lazy('autos-list')
print("im in create")
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
|
# robot game
# use commands: right, left, up, down, fire, status and quit
class Robot:
def __init__(self):
self.XCoordinate = 10
self.YCoordinate = 10
self.FuelAmount = 100
def move_left(self):
if (self.FuelAmount <=0):
print("Insufficient fuel to perform action")
else:
self.FuelAmount -= 5
self.XCoordinate -= 1
return
def move_right(self):
if (self.FuelAmount <=0):
print("Insufficient fuel to perform action")
else:
self.FuelAmount -= 5
self.XCoordinate += 1
return
def move_up(self):
if (self.FuelAmount <=0):
print("Insufficient fuel to perform action")
else:
self.FuelAmount -= 5
self.YCoordinate += 1
return
def move_down(self):
if (self.FuelAmount <= 0):
print("Insufficient fuel to perform action")
else:
self.FuelAmount -= 5
self.YCoordinate -= 1
return
def display_currentstatus(self):
print("({}, {}) - Fuel: {}" .format(self.XCoordinate, self.YCoordinate, self.FuelAmount))
return
def fire_laser(self):
print("Pew! Pew!")
if (self.FuelAmount <= 0):
print("Insufficient fuel to perform action")
else:
self.FuelAmount -= 15
return
def main():
currentRobot = Robot()
command = ""
while (command != "quit"):
command = input("Enter command: ")
if (command == "left"):
currentRobot.move_left()
elif (command == "right"):
currentRobot.move_right()
elif (command == "up"):
currentRobot.move_up()
elif (command == "down"):
currentRobot.move_down()
elif (command == "fire"):
currentRobot.fire_laser()
elif (command == "status"):
currentRobot.display_currentstatus()
elif (command == "quit"):
print("Goodbye.")
if __name__ == "__main__":
main()
|
# -*- coding:utf8 -*-
import tornado.web
import re
touch_re = re.compile(
r'.*(iOS|iPhone|Android|Windows Phone|webOS|BlackBerry|Symbian|Opera Mobi|UCBrowser|MQQBrowser|Mobile|Touch).*',
re.I)
class BaseHandler(tornado.web.RequestHandler):
@property
def theme(self):
'''获取访问类型 '''
try:
ua = self.request.headers.get("User-Agent", "")
theme = True if touch_re.match(ua) else False
except Exception as e:
theme = False
return theme
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 10:37:06 2017
@author: Samuel
"""
# change just to push to github!!!!!
"""Introduction to Bayesian Inference"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
#Create a list of the number of coin tosses ('Bernoulli Trials')
number_of_trials = [0, 2, 10, 20, 50, 500]
#Conduct 500 coin tosses and output into a list of 0s and 1s
#where 0 represents a tail and 1 represents a head.
data = stats.bernoulli.rvs(.5, size=number_of_trials[-1])
#Discretize the x-axis into 100 separate plotting points
x = np.linspace(0, 1, 100)
#Loops over the number_of_trials list to continually add more coin toss data.
#Fore each new set of data, we update our (current) prior belief to be a new
#posterior. This is carried out using what is known as the Beta-Binomial Model.
#For the time being, we won't worry about this too much.
for i, N in enumerate(number_of_trials):
#Accumulate the total number of heads for this particular Bayesian update
heads = data[:N].sum()
#Create an axes subplot for each update
ax = plt.subplot(len(number_of_trials) / 2, 2, i + 1)
ax.set_title('%s trials, %s heads' % (N, heads))
#Add labels to both axes and hide labels on y-axis
plt.xlabel('$P(H)$, Probability of Heades')
plt.ylabel('Density')
if i == 0:
plt.ylim([0.0, 2.0])
plt.setp(ax.get_yticklabels(), visible=False)
#Create and plot a Beta Distribution to represent the posterior belief
#in the fairness of the coin.
y = stats.beta.pdf(x, 1 + heads, 1 + N - heads)
plt.plot(x, y, label='observe %d tosses, \n %d heads' % (N, heads))
plt.fill_between(x, 0, y, color='#aaaadd', alpha=.5)
#Expand plot to cover full width/height
plt.tight_layout()
plt.show()
"""Bayesian Inference of a Binomial Proportion"""
import seaborn as sns
from scipy.stats import beta
sns.set_palette('deep', desat=.6)
sns.set_context(rc={'figure.figuresize': (8, 4)})
x = np.linspace(0, 1, 100)
params = [(.5, .5), (1,1), (4, 3), (2, 5), (6, 6)]
for p in params:
y = beta.pdf(x, p[0], p[1])
plt.plot(x, y, label='$\\alpha=%s$, $\\beta=%s$' % p)
plt.xlabel('$\\theta$, Fairness')
plt.ylabel('Density')
plt.legend(title='Parameters')
# One more commit before making a new branch!!!
# new feature 1 |
from alayatodo.models import Todos, Users
from alayatodo import db
def init_fixture():
user1 = Users(username="user1",password="user1")
user2 = Users(username="user2",password="user2")
user3 = Users(username="user3",password="user3")
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.commit()
todo1 = Todos(user_id=1,description='Vivamus tempus',done=False)
todo2 = Todos(user_id=1,description='lorem ac odio',done=False)
todo3 = Todos(user_id=1,description='Ut congue odio',done=False)
todo4 = Todos(user_id=1,description='Sodales finibus',done=False)
todo5 = Todos(user_id=2,description='Accumsan nunc vitae',done=False)
todo6 = Todos(user_id=2,description='Lorem ipsum',done=False)
todo7 = Todos(user_id=2,description='In lacinia est',done=False)
todo8 = Todos(user_id=1,description='Vivamus tempus',done=False)
todo9 = Todos(user_id=1,description='Odio varius gravida',done=False)
db.session.add(todo1)
db.session.add(todo2)
db.session.add(todo3)
db.session.add(todo4)
db.session.add(todo5)
db.session.add(todo6)
db.session.add(todo7)
db.session.add(todo8)
db.session.add(todo9)
db.session.commit() |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
res, level = [], [root]
depth = 0
while level:
depth += 1
current_val = []
next_level = []
for node in level:
current_val += node.val,
if node.left:
next_level += node.left,
if node.right:
next_level += node.right,
level = next_level
if depth % 2 == 0:
current_val = current_val[::-1]
res += current_val,
return res
|
"""
pems_delete.py
"""
import requests
from .exceptions import AgaveFilesError
from ..utils import handle_bad_response_status_code
def files_pems_delete(tenant_url, access_token, path):
""" Remove user permissions associated with a file or folder.
These permissions are set at the API level and do not reflect *nix or other
file system ACL.
Deletes all permissions on a file except those of the owner.
"""
# Set request url.
endpoint = "{0}/{1}/{2}".format(tenant_url, "files/v2/pems/system", path)
# Obtain file path. "path" should include the system name at the begining,
# so we get rid of it.
destination = '/'.join(path.split('/')[1:])
# Make request.
try:
headers = {"Authorization": "Bearer {0}".format(access_token)}
params = {"pretty": "true"}
resp = requests.delete(endpoint, headers=headers, params=params)
except Exception as err:
raise AgaveFilesError(err)
# Handle bad status code.
handle_bad_response_status_code(resp)
|
# 11_CountVectorizer01.py
from sklearn.feature_extraction.text import CountVectorizer
# CountVectorizer : 문자열에서 단어 토큰을 생성하여 BOW로 인코딩된 벡터를 생성해줍니다.
# df : document-frequency
# min_df = 2 : 최소 빈도가 2번 이상인 단어들만 ..
# stop_words = 불용어 (제외)
vectorizer = CountVectorizer(min_df=2, stop_words=['친구'])
print(type(vectorizer))
sentences = ['우리 아버지 여자 친구 이름은 홍길동 홍길동', '홍길동 여자 친구 이름은 심순애 심순애', '여자 친구 있나요.']
mat = vectorizer.fit(sentences)
print(type(mat))
print(mat.vocabulary_)
# 토큰
features = vectorizer.get_feature_names()
print(type(features))
print(features)
print('불용어')
print(vectorizer.get_feature_names())
myword = [sentences[0]]
print('myword : ', myword)
myarray = vectorizer.transform(myword).toarray()
print(type(myarray))
'''
0('여자')이 1번 1('이름은')이 1번, 2('홍길동')가 1번 나왔습니다.
myarray : [[1 1 3]]
단어 사전 : {'여자': 0, '이름은': 1, '홍길동': 2}
myword : ['우리 아버지 여자 친구 이름은 홍길동 홍길동 홍길동']
'''
print('myarray : ', myarray)
print('finished')
|
import tensorflow as tf
from tf_rl.common.utils import *
from tf_rl.agents.core import Agent_atari, Agent_cartpole
class DQfD_atari(Agent_atari):
"""
DQfD
"""
def __init__(self, model, optimizer, loss_fn, grad_clip_fn, num_action, params):
self.params = params
self.num_action = num_action
self.pretrain_flag = True
self.grad_clip_fn = grad_clip_fn
self.loss_fn = loss_fn
self.eval_flg = False
self.index_timestep = 0
self.main_model = model(num_action)
self.target_model = model(num_action)
self.optimizer = optimizer
self.manager = create_checkpoint(model=self.main_model,
optimizer=self.optimizer,
model_dir=params.model_dir)
@tf.contrib.eager.defun(autograph=False)
def _select_action(self, state):
return self.main_model(state)
@tf.contrib.eager.defun(autograph=False)
def _inner_update(self, states, actions, rewards, next_states, dones):
# get the current global-timestep
self.index_timestep = tf.train.get_global_step()
# We divide the grayscale pixel values by 255 here rather than storing
# normalized values because uint8s are 4x cheaper to store than float32s.
states, next_states = states / 255., next_states / 255.
s, s_n = tf.split(states, 2, axis=-1)
a, a_n = tf.split(actions, 2, axis=-1)
r, r_n = tf.split(rewards, 2, axis=-1) # reward is already discounted in replay_buffer
ns, ns_n = tf.split(next_states, 2, axis=-1)
d, d_n = tf.split(dones, 2, axis=-1)
a_e, a_l = tf.split(a, 2, axis=1)
a_e_n, a_l_n = tf.split(a_n, 2, axis=1)
# flatte them
r, r_n = tf.reshape(r, [-1]), tf.reshape(r_n, [-1])
d, d_n = tf.reshape(d, [-1]), tf.reshape(d_n, [-1])
a_e, a_l = tf.reshape(a_e, [-1]), tf.reshape(a_l, [-1])
a_e_n, a_l_n = tf.reshape(a_e_n, [-1]), tf.reshape(a_l_n, [-1])
# ===== make sure to fit all process to compute gradients within this Tape context!! =====
with tf.GradientTape() as tape:
one_step_loss = self._one_step_loss(s, a_e, r, ns, d)
n_step_loss = self._n_step_loss(s, s_n, a_e_n, r_n, d)
large_margin_clf_loss = self._large_margin_clf_loss(a_e, a_l)
l2_loss = tf.add_n(self.main_model.losses) * self.params.L2_reg
# combined_loss = one_step_loss + lambda_1*n_step_loss + lambda_2*large_margin_clf_loss + lambda_3*l2_loss
if self.pretrain_flag:
combined_loss = one_step_loss + 1.0 * n_step_loss + 1.0 * large_margin_clf_loss + (10 ** (-5)) * l2_loss
else:
combined_loss = one_step_loss + 1.0 * n_step_loss + 0.0 * large_margin_clf_loss + (10 ** (-5)) * l2_loss
# TODO: check if this is really correct..
loss = tf.math.reduce_sum(combined_loss)
# get gradients
grads = tape.gradient(loss, self.main_model.trainable_weights)
# clip gradients
grads = self.grad_clip_fn(grads)
# apply processed gradients to the network
self.optimizer.apply_gradients(zip(grads, self.main_model.trainable_weights))
return loss, combined_loss
def _one_step_loss(self, states, actions_e, rewards, next_states, dones):
# calculate target: max_a Q(s_{t+1}, a_{t+1})
next_Q_main = self.main_model(next_states)
next_Q = self.target_model(next_states)
# calculate Q(s,a)
q_values = self.main_model(states)
idx_flattened = tf.range(0, tf.shape(next_Q)[0]) * tf.shape(next_Q)[1] + tf.cast(
tf.math.argmax(next_Q_main, axis=-1), tf.int32)
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
chosen_next_q = tf.gather(tf.reshape(next_Q, [-1]), idx_flattened)
Y = rewards + self.params.gamma * chosen_next_q * (1. - dones)
Y = tf.stop_gradient(Y)
# get the q-values which is associated with actually taken actions in a game
actions_one_hot = tf.one_hot(actions_e, self.num_action, 1.0, 0.0)
chosen_q = tf.math.reduce_sum(tf.math.multiply(actions_one_hot, q_values), reduction_indices=1)
return tf.math.subtract(Y, chosen_q)
def _n_step_loss(self, states, states_n, actions_e, rewards_n, dones):
# calculate target: max_a Q(s_{t+n}, a_{t+n})
n_step_Q_main = self.main_model(states_n)
n_step_Q = self.target_model(states_n)
# calculate Q(s,a)
q_values = self.main_model(states)
idx_flattened = tf.range(0, tf.shape(n_step_Q)[0]) * tf.shape(n_step_Q)[1] + tf.cast(
tf.math.argmax(n_step_Q_main, axis=-1), tf.int32)
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
action_probs = tf.gather(tf.reshape(n_step_Q, [-1]), idx_flattened)
# n-step discounted reward
# TODO: check if this is correct
G = tf.math.reduce_sum([self.params.gamma ** i * rewards_n for i in range(self.params.n_step)])
# TD-target
# TODO: think how to take `dones` into account in TD-target
Y = G + self.params.gamma ** self.params.n_step * action_probs * (1. - dones)
# get the q-values which is associated with actually taken actions in a game
actions_one_hot = tf.one_hot(actions_e[-1], self.num_action, 1.0, 0.0)
chosen_q = tf.math.reduce_sum(tf.math.multiply(actions_one_hot, q_values), reduction_indices=1)
return tf.math.subtract(Y, chosen_q)
def _large_margin_clf_loss(self, a_e, a_l):
"""
Logic is as below
if a_e == a_l:
return 0
else:
return 0.8
:param a_e:
:param a_l:
:return:
"""
result = (a_e != a_l)
return result * 0.8
class DQfD_cartpole(Agent_cartpole):
"""
DQfD
"""
def __init__(self, model, optimizer, loss_fn, grad_clip_fn, num_action, params):
self.params = params
self.num_action = num_action
self.pretrain_flag = True
self.grad_clip_fn = grad_clip_fn
self.loss_fn = loss_fn
self.eval_flg = False
self.index_timestep = 0
self.main_model = model(num_action)
self.target_model = model(num_action)
self.optimizer = optimizer
self.manager = create_checkpoint(model=self.main_model,
optimizer=self.optimizer,
model_dir=params.model_dir)
@tf.contrib.eager.defun(autograph=False)
def _select_action(self, state):
return self.main_model(state)
@tf.contrib.eager.defun(autograph=False)
def _inner_update(self, states, actions, rewards, next_states, dones):
# get the current global-timestep
self.index_timestep = tf.train.get_global_step()
s, s_n = tf.split(states, 2, axis=-1)
a, a_n = tf.split(actions, 2, axis=-1)
r, r_n = tf.split(rewards, 2, axis=-1) # reward is already discounted in replay_buffer
ns, ns_n = tf.split(next_states, 2, axis=-1)
d, d_n = tf.split(dones, 2, axis=-1)
a_e, a_l = tf.split(a, 2, axis=1)
a_e_n, a_l_n = tf.split(a_n, 2, axis=1)
# flatte them
r, r_n = tf.reshape(r, [-1]), tf.reshape(r_n, [-1])
d, d_n = tf.reshape(d, [-1]), tf.reshape(d_n, [-1])
a_e, a_l = tf.reshape(a_e, [-1]), tf.reshape(a_l, [-1])
a_e_n, a_l_n = tf.reshape(a_e_n, [-1]), tf.reshape(a_l_n, [-1])
# ===== make sure to fit all process to compute gradients within this Tape context!! =====
with tf.GradientTape() as tape:
one_step_loss = self._one_step_loss(s, a_e, r, ns, d)
n_step_loss = self._n_step_loss(s, s_n, a_e_n, r_n, d)
large_margin_clf_loss = self._large_margin_clf_loss(a_e, a_l)
l2_loss = tf.add_n(self.main_model.losses) * self.params.L2_reg
# combined_loss = one_step_loss + lambda_1*n_step_loss + lambda_2*large_margin_clf_loss + lambda_3*l2_loss
if self.pretrain_flag:
combined_loss = one_step_loss + 1.0 * n_step_loss + 1.0 * large_margin_clf_loss + (10 ** (-5)) * l2_loss
else:
combined_loss = one_step_loss + 1.0 * n_step_loss + 0.0 * large_margin_clf_loss + (10 ** (-5)) * l2_loss
# TODO: check if this is really correct..
loss = tf.math.reduce_sum(combined_loss)
# get gradients
grads = tape.gradient(loss, self.main_model.trainable_weights)
# clip gradients
grads = self.grad_clip_fn(grads)
# apply processed gradients to the network
self.optimizer.apply_gradients(zip(grads, self.main_model.trainable_weights))
return loss, combined_loss
def _one_step_loss(self, states, actions_e, rewards, next_states, dones):
# calculate target: max_a Q(s_{t+1}, a_{t+1})
next_Q_main = self.main_model(next_states)
next_Q = self.target_model(next_states)
# calculate Q(s,a)
q_values = self.main_model(states)
idx_flattened = tf.range(0, tf.shape(next_Q)[0]) * tf.shape(next_Q)[1] + tf.cast(
tf.math.argmax(next_Q_main, axis=-1), tf.int32)
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
chosen_next_q = tf.gather(tf.reshape(next_Q, [-1]), idx_flattened)
Y = rewards + self.params.gamma * chosen_next_q * (1. - dones)
Y = tf.stop_gradient(Y)
# get the q-values which is associated with actually taken actions in a game
actions_one_hot = tf.one_hot(actions_e, self.num_action, 1.0, 0.0)
chosen_q = tf.math.reduce_sum(tf.math.multiply(actions_one_hot, q_values), reduction_indices=1)
return tf.math.subtract(Y, chosen_q)
def _n_step_loss(self, states, states_n, actions_e, rewards_n, dones):
# calculate target: max_a Q(s_{t+n}, a_{t+n})
n_step_Q_main = self.main_model(states_n)
n_step_Q = self.target_model(states_n)
# calculate Q(s,a)
q_values = self.main_model(states)
idx_flattened = tf.range(0, tf.shape(n_step_Q)[0]) * tf.shape(n_step_Q)[1] + tf.cast(
tf.math.argmax(n_step_Q_main, axis=-1), tf.int32)
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
action_probs = tf.gather(tf.reshape(n_step_Q, [-1]), idx_flattened)
# n-step discounted reward
# TODO: check if this is correct
G = tf.math.reduce_sum([self.params.gamma ** i * rewards_n for i in range(self.params.n_step)])
# TD-target
# TODO: think how to take `dones` into account in TD-target
Y = G + self.params.gamma ** self.params.n_step * action_probs * (1. - dones)
# get the q-values which is associated with actually taken actions in a game
actions_one_hot = tf.one_hot(actions_e[-1], self.num_action, 1.0, 0.0)
chosen_q = tf.math.reduce_sum(tf.math.multiply(actions_one_hot, q_values), reduction_indices=1)
return tf.math.subtract(Y, chosen_q)
def _large_margin_clf_loss(self, a_e, a_l):
"""
Logic is as below
if a_e == a_l:
return 0
else:
return 0.8
:param a_e:
:param a_l:
:return:
"""
result = (a_e != a_l)
return result * 0.8
|
# This is a function for k-fold cross-validation on (X; y)
# Yi Ding
import numpy as np
# This function return the accuracy score of the prediction for classification
def my_accuracy_score_classification(ytrue, ypred, metric):
ytrue = np.array(ytrue)
ypred = np.array(ypred)
if ytrue.shape[0] != ypred.shape[0]:
raise Exception('ERROR: ytrue and ypred not same length!')
accuracy_score = 0
for i in range(0,ytrue.shape[0]):
if ytrue[i] == ypred[i]:
accuracy_score = accuracy_score + 1
if metric == 'accuracy':
return float(accuracy_score)/float(ytrue.shape[0])
else:
raise Exception('No that metric')
# This function return the accuracy score of the prediction for regression
def my_accuracy_score_regression(ytrue, ypred, metric='mae'):
ytrue = np.array(ytrue)
ypred = np.array(ypred)
if ytrue.shape[0] != ypred.shape[0]:
raise Exception('ERROR: ytrue and ypred not same length!')
# Here we use R^2(R Square) to evaluate the performance of the model
y_bar = np.mean(ytrue)
sum_hat_sqr = 0
sum_bar_sqr = 0
sum_abs_err = 0
for i in range(0,ytrue.shape[0]):
sum_hat_sqr = sum_hat_sqr + (ytrue[i]-ypred[i])*(ytrue[i]-ypred[i])
sum_bar_sqr = sum_bar_sqr + (ytrue[i]-y_bar)*(ytrue[i]-y_bar)
sum_abs_err = sum_abs_err + np.abs(ytrue[i]-ypred[i])
r_sqr = 1 - sum_hat_sqr/sum_bar_sqr
mse = sum_hat_sqr/ytrue.shape[0]
mae = sum_abs_err/ytrue.shape[0]
if metric == 'mae':
return mae
elif metric == 'mse':
return mse
elif metric == 'r_square':
return r_sqr
else:
raise Exception('No that metric')
# Main function
# ml_type = 0 means classification
# ml_type = 1 means regression
def my_cross_val(method, X, y, k, ml_type='classification', metric = 'accuracy'):
X = np.array(X)
y = np.array(y)
y = np.reshape(y,(X.shape[0],1))
# Initialize array for the test set error
errRat = np.empty([k, 1])
# Permute the indices randomly
rndInd = np.random.permutation(y.size)
# Start and end index of test set
sttInd = 0
endInd = (np.array(y.size/k).astype(int))
indLen = (np.array(y.size/k).astype(int))
for i in range(0, k):
# Prepare training data and test data
Xtrain = np.concatenate((X[rndInd[0:sttInd],:],X[rndInd[endInd:y.size],:]), axis=0)
ytrain = np.concatenate((y[rndInd[0:sttInd]],y[rndInd[endInd:y.size]]), axis=0)
Xtest = X[rndInd[sttInd:endInd],:]
ytest = y[rndInd[sttInd:endInd]]
sttInd = endInd
endInd = endInd + indLen
# Create the model
# myMethod = method()
myMethod = method # Directly passing the model
# Fit the data
myMethod.fit(Xtrain,ytrain.ravel())
# Test the model on (new) data
ypred = myMethod.predict(Xtest)
#print("ytest:",ytest)
#print("ypred:",ypred)
# Save error rate
if ml_type == 'classification':
errRat[i] = 1 - my_accuracy_score_classification(ytest, ypred, metric)
elif ml_type == 'regression':
errRat[i] = my_accuracy_score_regression(ytest, ypred, metric)
else:
raise Exception('Invalid ml_type!')
return errRat |
from typing import List, Optional
import databricks.koalas as ks
import numpy as np
import pandas as pd
from pyspark.ml.feature import Bucketizer
from sklearn.base import BaseEstimator, TransformerMixin
from data_utils.preprocessing.base import set_df_library
class ColumnBinner(BaseEstimator, TransformerMixin):
"""
Bucketize a column in Pandas/Koalas dataframe with given split points and labels.
:param Optional[str] from_column: column from which to bucket,
default None
:param Optional[str] to_column: column to which to store the labels
default None
:param Optional[List[float]] bins: numerical split points with n+1 elements,
default None
:param Optional[List[str]] labels: labels for each bin with n elements,
default None
"""
def __init__(
self,
from_column: Optional[str] = None,
to_column: Optional[str] = None,
bins: Optional[List[float]] = None,
labels: Optional[List[str]] = None,
):
self.from_column = from_column
self.to_column = to_column
self.bins = bins
self.labels = labels
self.dflib_ = None
def fit(self, X, y=None):
# pylint: disable=unused-argument
return self
def transform(self, X):
"""Bucket from_column and assign to
to_column in X.
:param Union[pd.DataFrame, ks.DataFrame] X: input pandas/koalas dataframe
:return: transformed dataframe
"""
self.dflib_ = set_df_library(X)
if self.dflib_ == ks:
# rename "index" column to "index_place_holder" if already exist
if "index" in X.columns:
X = X.rename(columns={"index": "index_place_holder"})
# drop to_column if already exist
if self.to_column in X.columns:
X = X.drop(self.to_column)
# Bucketizer's right range point is inclusive, warning: 0 will be converted to negative
sdf = X.to_spark(index_col="index_")
bucketizer = Bucketizer(
splits=self.bins, inputCol=self.from_column, outputCol=self.to_column, handleInvalid="keep"
)
sdf = bucketizer.transform(sdf)
X = sdf.to_koalas(index_col="index_")
# X = X.rename(columns={"index": "index_"})
# X.set_index("index_", inplace=True)
if "index_place_holder" in X.columns:
X = X.rename(columns={"index_place_holder": "index"})
# ks doesn't support multi-dtype repalcement, e.g. {1.0: 'a'},
# but NaN is still kept null after astype('str')
X[self.to_column] = X[self.to_column].astype("str")
X = X.replace(dict(zip(np.arange(0.0, len(self.bins) - 1).astype("str").tolist(), self.labels)))
elif self.dflib_ == pd:
# pd.cut will return category dtype, which is unrecognizable for
# spark, ks, Alteryx, so let's convert to str,
# warning: 0.0 will be converted to '0.0'
null_filter = X[self.from_column].isnull()
X[self.to_column] = pd.cut(x=X[self.from_column], bins=self.bins, labels=self.labels).astype("str")
X.loc[null_filter, self.to_column] = np.nan
return X
|
from django import forms
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from transferencia.models import Transferencia, Troca
from header.validators import consultar_bi_existe, validar_comprimento_4, validar_numeros, validar_string, validar_bi, consultar_numero_agente, consultar_bi
from header.opcoesModel import SITUACAO_TRANSFERENCIA, ORGAO_COMANDOS
import header
class TransferenciaForm(ModelForm):
bi = forms.CharField(max_length=14, required=True, widget=forms.TextInput(attrs={'class': 'form-control bi_agente'}), validators=[validar_bi,consultar_bi_existe])
orgao_destino = forms.CharField(max_length=100, widget=forms.Select(choices=ORGAO_COMANDOS))
data_entrada = forms.CharField(widget=forms.DateInput(attrs={'type': 'date', 'data-inputmask': "'mask' : '99/99/9999'"}))
dispacho = forms.CharField(max_length=30, required=False, widget=forms.TextInput(attrs={'data-inputmask': "'mask' : '9999/99'"}))
motivo = forms.CharField(max_length=900, required=False, widget=forms.Textarea(attrs={'length':900}))
arquivo = forms.FileField(required=False)
numero_guia = forms.CharField(max_length=10)
situacao = forms.CharField(max_length=30, required=False)
class Meta:
model = Transferencia
fields = [ 'orgao_destino', 'data_entrada', 'dispacho', 'motivo','arquivo', 'numero_guia','situacao']
class TrocaForm(ModelForm):
bi1 = forms.CharField(max_length=14, required=True, validators=[validar_bi, consultar_bi_existe])
bi2 = forms.CharField(max_length=14, required=True, validators=[validar_bi, consultar_bi_existe])
origem_primeiro = forms.CharField(max_length=100, widget=forms.Select(choices=ORGAO_COMANDOS))
destino_primeiro = forms.CharField(max_length=100, widget=forms.Select(choices=ORGAO_COMANDOS))
origem_segundo = forms.CharField(max_length=100, required=False)
destino_segundo = forms.CharField(max_length=100, required=False)
data = forms.CharField(required=False, widget=forms.TextInput(attrs={'type': 'date', 'data-inputmask': "'mask' : '99/99/9999'"}))
motivo = forms.CharField(max_length=2000, required=False, widget=forms.Textarea(attrs={'class': 'form-control', 'length':900}))
class Meta:
model = Troca
fields = ['origem_primeiro', 'destino_primeiro', 'origem_segundo', 'destino_segundo', 'data', 'motivo']
def clean_bi2(self):
bi_1 = self.cleaned_data['bi1']
bi_2 = self.cleaned_data['bi2']
if bi_1 == bi_2:
raise ValidationError("O numero não e valido, não podem ser igual")
return bi_2
def clean_destino_primeiro(self):
destino_primeiro = self.cleaned_data.get('destino_primeiro')
origem_primeiro = self.cleaned_data.get('origem_primeiro')
if destino_primeiro == origem_primeiro:
raise forms.ValidationError(" Ops.. Erro o destino não pode ser igual a origem ")
else:
return destino_primeiro
class Nip_Form(forms.Form):
nip = forms.CharField(max_length=14, required=True)
|
#!/usr/bin/python
import smbus
import time
import hd44870_lib as lcd
lcd.lcd_init()
lcd.lcd_clean()
while(1):
lcd.lcd_backlite(True)
lcd.lcd_write_lines("test1","test2")
time.sleep(1)
lcd.lcd_write_lines("test2","test1")
time.sleep(1)
lcd.lcd_backlite(False)
lcd.lcd_write_lines("test1","test2")
time.sleep(1)
lcd.lcd_write_lines("test2","test1")
time.sleep(1)
|
from django.template import Context
from .models import ContactInfo
def contact(request):
details = ContactInfo.objects.get(id=1)
return {'contact_info': details}
|
numb=int(input("Enter the number"))
if numb < 0:
print("Enter a positive number")
else:
sum = 0
while(numb > 0):
sum += numb
numb -= 1
print("The sum is",sum)
|
from django.conf.urls import url, include
from rest_framework import routers
from project.api import views
router=routers.DefaultRouter()
router.register(r'users',views.UserViewSet)
urlpatterns=[
url(r'^',include(router.urls)),
url(r'^api-auth',include('rest_framework.urls',namespace='rest_framework'))
] |
# convert_gui.pyw
# program to convert Celsius to Farenheit using a simple graphical interface
from graphics import *
def main():
win = GraphWin("Celsius Converter", 400, 300)
win.setCoords(0.0, 0.0, 3.0, 4.0)
# Draw the interface
Text(Point(1,3), " Celsius Temperature:").draw(win)
Text(Point(1,1), "Farenheit Temperature: ").draw(win)
input = Entry(Point(2,3), 5)
input.setText("0.0")
input.draw(win)
output = Text(Point(2,1), "")
output.draw(win)
button = Text(Point(1.5,2.0),"Convert It")
button.draw(win)
Rectangle(Point(1,1.5), Point(2,2.5)).draw(win)
# wait for a mouse click
win.getMouse()
# Convert inout
celsius = float(input.getText())
farenheit = 9.0/5.0 * celsius + 32
# display output and change button
output.setText(farenheit)
button.setText("Quit")
# Wait for click and quit
win.getMouse()
win.close()
main()
|
import time
import subprocess
import rclpy
from rclpy.node import Node
from tms_msg_ts.srv import TsReq
from tms_msg_ts.srv import TsStateControl
class Task:
def __init__(self):
self.rostime = 0
self.task_id = 0
self.robot_id = 0
self.object_id = 0
self.user_id = 0
self.place_id = 0
self.priority = 0
def __lt__(self, other):
return (self.priority, self.rostime) < (other.priority, other.rostime)
class TaskManagerElements:
def __init__(self, task):
self.num = 0
self.flag = True
self.task = task
class TmsTsMaster(Node):
def __init__(self):
super().__init__('tms_ts_master')
self.state_condition = -1
self.loop_counter = 0
self.abort = False
self.task_manager = []
self.task_list = []
# Callbacks(service)
self.create_service(TsReq, 'tms_ts_master', self.tms_ts_master_callback)
self.create_service(TsStateControl, 'ts_state_control', self.ts_state_control_callback)
def create_service_call(self, rostime, task_id, robot_id, object_id, user_id, place_id, priority, thread_num):
print("[create_service_call] >> start")
service_name = "request"
command = (
'ros2 service call /'
+ service_name
+ str(self.task_manager[thread_num].num)
+ ' "'
+ str(rostime)
+ '" "'
+ str(task_id)
+ '" "'
+ str(robot_id)
+ '" "'
+ str(user_id)
+ '" "'
+ str(place_id)
+ '" "'
+ str(priority)
+ '"\n'
)
return command
def create_run_command(self, thread_num):
command = (
'ros2 run --help' # TODO: write run command
)
return command
def execute_command(self, buf):
print("[execute_command] >> " + buf)
subprocess.run(buf.split())
# TODO: check cannnot run the command
return True
def tms_ts_master_callback(self, request, response):
print("[tms_ts_master_callback] >> start")
task = Task()
task.task_id = request.task_id
task.robot_id = request.robot_id
task.object_id = request.object_id
task.user_id = request.user_id
task.place_id = request.place_id
task.priority = request.priority
task.rostime = int(time.time() * 1000000) #nsec
self.task_list.append(task)
self.task_list.sort()
# print all tasks in task_list
for task in self.task_list:
print("task_id: " + str(task.task_id))
print("priority: " + str(task.priority))
response.result = 1
return response
def ts_state_control_callback(self, request, response):
print("[ts_state_control_callback] >> start")
if request.type == 0: # judge segment(from TS)
if request.cc_subtasks == 0:
pass
elif request.cc_subtasks >= 2:
pass
else:
self.get_logger().info("Illegal subtasks number.")
self.state_condition = -1
self.loop_counter = 0
response.result = 0
return response
elif request.type == 1:
pass
elif request.type == 2:
pass
else:
pass
def add_thread(self, thread_num, arg1, arg2):
pass
def main(args=None):
rclpy.init(args=args)
tms_ts_master = TmsTsMaster()
rclpy.spin(tms_ts_master)
tms_ts_master.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
from time_dependent_ssa import SSA
from math import *
from scipy.stats import norm
dnorm = norm.rvs
from utils import *
from collections import defaultdict
import mpmath
gamma_m_rate = log(2)/15
gamma_p_rate = 0 #log(2)/20 #???
sample_kr2_rate = lambda :max(0,dnorm(0.5,0.1))
sample_kr3_rate = lambda :max(0,dnorm(2,0.4))
sample_forward_kinase_independent_rate = lambda : max(0,dnorm(1,(0.2))) # mean, sd
sample_backward_kinase_independent_rate = lambda : max(0,dnorm(2,(0.4))) # mean, sd
sample_forward_alpha = lambda :dnorm(-1,0.2)
sample_backward_alpha = lambda :dnorm(1,0.2)
sample_forward_beta = lambda :dnorm(2,0.5)
sample_backward_beta = lambda :dnorm(-2,0.5)
k_t_rate = 0 # for now
FAST = 10**6
def kinase(t):
return 0 if t < 120 else 10
def kinase2(t):
if t < 240:
if t < 120:
return 0
else:
return 0.5
else:
if t < 360:
return 1
else:
return 2
def f_k12_indep(k):
#k = sample_forward_kinase_independent_rate()
return lambda (s1,s2,s3,m),t:k*s1
def f_k12_dep(alpha,beta,kinase):
# alpha = sample_forward_alpha()
# beta = sample_forward_beta()
return lambda (s1,s2,s3,m),t:max(0,alpha + beta*kinase(t))*s1
def f_k21_indep(k):
return lambda (s1,s2,s3,m),t:k*s2
def f_k21_dep(alpha,beta,kinase):
return lambda (s1,s2,s3,m),t:max(0,alpha + beta*kinase(t))*s2
def f_k23_indep(k):
return lambda (s1,s2,s3,m),t:k*s2
def f_k23_dep(alpha,beta,kinase):
return lambda (s1,s2,s3,m),t:max(0,alpha + beta*kinase(t))*s2
def f_k32_indep(k):
return lambda (s1,s2,s3,m),t:k*s3
def f_k32_dep(alpha,beta,kinase):
return lambda (s1,s2,s3,m),t:max(0,alpha + beta*kinase(t))*s3
def f_kr2(kr2):
return lambda (s1,s2,s3,m),t:kr2*s2
def f_kr3(kr3):
return lambda (s1,s2,s3,m),t:kr3*s3
def f_gamma_m():
return lambda (s1,s2,s3,m),t:m*gamma_m_rate
# State vector is as follows: S1, S2, S3, M, P, C2, C3, CM
model_classes = [12,21,23,32]
forwards = [12,23]
backwards = [21,32]
def generate_model(model_class,kinase,init_run_time=0):
k12_rate = sample_forward_kinase_independent_rate()
k12 = f_k12_indep(k12_rate)
k21_rate = sample_backward_kinase_independent_rate()
k21 = f_k21_indep(k21_rate)
k23_rate = sample_forward_kinase_independent_rate()
k23 = f_k23_indep(k23_rate)
k32_rate = sample_backward_kinase_independent_rate()
k32 = f_k32_indep(k32_rate)
for mc in [12,21,23,32]:
if mc in forwards:
alpha_rate = sample_forward_alpha()
beta_rate = sample_forward_beta()
elif mc in backwards:
alpha_rate = sample_backward_alpha()
beta_rate = sample_backward_beta()
else:
raise Exception("invalid model class")
if mc == model_class:
fmt_str = (mc,mc, alpha_rate,beta_rate)
exec("k{0} = f_k{1}_dep({2},{3},kinase)".format(*fmt_str))
kr2_rate = sample_kr2_rate()
kr3_rate = sample_kr3_rate()
kr2 = f_kr2(kr2_rate)
kr3 = f_kr3(kr3_rate)
gamma_m = f_gamma_m()
init_state = (1,0,0,0)
species_names = "S1 S2 S3 M P C2 C3 Cm".split()
model_reactions = [((-1, 1, 0, 0), k12), # S1 -> S2
(( 1,-1, 0, 0), k21), # S2 -> S1
(( 0,-1, 1, 0), k23), # S2 -> S3
(( 0, 1,-1, 0), k32), # S3 -> S2
# S2 -> C2 (transcribe from S2)
(( 0,0, 0, 1), kr2),
# S3 -> C3 (transcribe from S3)
(( 0, 0,0, 1), kr3),
# M -> Cm (translate)
(( 0, 0, 0,-1), gamma_m), # M -> {}
((0,0,0,0),lambda (s1,s2,s3,m),t:1*s1)
]
sim = SSA(model_reactions,init_state,species_names=species_names)
sim.model_class = model_class
for param in "k12 k21 k23 k32 kr2 kr3 gamma_m alpha beta".split():
exec_string = "sim.{0} = {1}_rate".format(param,param)
exec(exec_string)
sim.parameters = (sim.k12,
sim.k21,
sim.k23,
sim.k32,
sim.kr2,
sim.kr3,
sim.alpha,
sim.beta)
sim.run(init_run_time)
return sim
def log_likelihood(p_sample,q_sample):
"""Compute ll of ps, given qs"""
pseudo_qs = list(q_sample) + range(100)
q_n = float(len(pseudo_qs))
qs = {k:v/q_n for k,v in Counter(pseudo_qs).items()} # pseudocount
return sum(log(qs[p]) for p in p_sample)
def model_class_inference_experiment():
reference_sims = {}
num_sims = 1000
for model_class in model_classes:
print "generating samples for:",model_class
sims = [generate_model(model_class,kinase,240) for i in range(num_sims)]
reference_sims[model_class] = sims
correct_guesses = 0
trials = 100
for trial in range(trials):
mc = random.choice(model_classes)
print "beginning trial:",trial
#trial_sims = [generate_model(mc,kinase,240) for i in range(num_sims)]
trial_sims = [generate_model(mc,kinase,120) for i in range(num_sims)]
trial_mrnas_120 = [sim.state_at(120)[3] for sim in trial_sims]
trial_mrnas_240 = [sim.state_at(240)[3] for sim in trial_sims]
lls = {}
for model_class in model_classes:
print "comparing", mc," to: ",model_class
ref_sims = reference_sims[model_class]
ref_mrnas_120 = [sim.state_at(120)[3] for sim in ref_sims]
ref_mrnas_240 = [sim.state_at(240)[3] for sim in ref_sims]
ll_120 = log_likelihood(ref_mrnas_120, trial_mrnas_120)
ll_240 = log_likelihood(ref_mrnas_240, trial_mrnas_240)
print "partial lls:",ll_120,ll_240
ll = ll_120 + ll_240
print "total ll:",ll
lls[model_class] = ll
best_class = max(lls.keys(),key=lambda k:lls[k])
if best_class == mc:
print "Guessed correctly:",lls[best_class], "vs:",[lls[c] for c in model_classes if not c == best_class]
correct_guesses += 1
else:
print "Guessed incorrectly:",lls[best_class],"vs:",lls[mc]
print "guessed correctly:",correct_guesses/float(trials)
print "loaded"
def likelihood_experiment(mrnas120,mrnas240):
mc = 21 # by previous experiment
ref_sims = [generate_model(mc,kinase) for i in range(1000)]
all_mrnas120 = []
all_mrnas240 = []
for j,ref_sim in enumerate(ref_sims):
print "ref_sim:",j
mrnas120 = []
mrnas240 = []
for i in range(100):
print i
ref_sim.history = []
ref_sim.time = 0
ref_sim.run(240)
mrnas120.append(ref_sim.state_at(120)[3])
mrnas240.append(ref_sim.state_at(240)[3])
all_mrnas120.append(mrnas120)
all_mrnas240.append(mrnas240)
return all_mrnas120,all_mrnas240,ref_sims
param_estimate = [1.0199423579837563, 2.621820189733821, 1.0811274643284017, 2.621820189733821, 0.5214392227617346, 1.6005073371928782, 0.9140125458538542, -2.3908331183609306]
def likelihood_experiment2(timepoints,data,kinase,samples,runs_per_sample):
"""Given timepoints,data at said time points, and kinase function,
return a list of parameter values with associated likelihoods"""
mc = 21
ref_sims = [generate_model(mc,kinase2) for i in range(samples)]
output = []
for j,ref_sim in enumerate(ref_sims):
print "ref_sim:",j
mrna_dict = defaultdict(list)
for i in range(runs_per_sample):
print i
ref_sim.history = []
ref_sim.time = 0
ref_sim.run(480)
for tp in timepoints:
mrna_dict[tp].append(ref_sim.state_at(tp)[3])
ll = sum(log_likelihood(mrna_dict[tp],data[i])
for i,tp in enumerate(timepoints))
print (ref_sim.parameters,ll)
output.append((ref_sim.parameters,ll))
return output
def samples_per_trial_experiment():
"""How many samples are required to reliably estimate the ll?"""
sim = generate_model(21,kinase2)
lls = []
for j in range(10):
mrna_dict = defaultdict(list)
for i in range(runs_per_sample):
print i
ref_sim.history = []
ref_sim.time = 0
ref_sim.run(480)
for tp in timepoints:
mrna_dict[tp].append(ref_sim.state_at(tp)[3])
ll = sum(log_likelihood(mrna_dict[tp],data[i])
for i,tp in enumerate(timepoints))
lls.append(ll)
print "ll:",ll
return lls
def mean_from_output(output):
"""Given output of the form [params,ll], return mean parameter
vector"""
param_vectors,lls = transpose(output)
z = sum(mpmath.exp(ll) for ll in lls)
return map(sum,transpose([[(mpmath.exp(ll)/z)*p for p in param_vector]
for param_vector,ll in output]))
|
from sudachipy import dictionary
from sudachipy import tokenizer
sudachi = dictionary.Dictionary().create()
mode = tokenizer.Tokenizer.SplitMode.C
line = 'ゼロ様の言うとおりでいしたわ'
doc = sudachi.tokenize(line, mode)
doc = list(reversed(doc))
bunsetu = ''
bunsetu_list = []
for i, token in enumerate(doc):
print(token.surface(), token.part_of_speech())
if '助詞' in token.part_of_speech()[0] or '助動詞' in token.part_of_speech()[0] or '記号' in token.part_of_speech()[0] or ('接尾辞' in token.part_of_speech()[0] and '名詞的' in token.part_of_speech()[1]):
bunsetu = token.surface() + bunsetu
else:
bunsetu = token.surface() + bunsetu
if i+1<=len(doc)-1 and doc[i+1].part_of_speech()[0] == '接頭辞':
bunsetu = doc[i+1].surface() + bunsetu
bunsetu_list.append(bunsetu)
bunsetu = ''
bunsetu_list.reverse()
print(bunsetu_list)
|
# This is created for reporting only.
# The api's here should not be used by normal application code.
# All api's should have the require_api_key_auth decorator and
# the url should be prefixed with /reports
|
import numpy as np
import requests
from urllib.parse import urlencode
import hashlib
import hmac
import time
import json
from const import ORDERBOOK_DEPTH
from keys import API_KEY, SECRET_KEY
BINANCE_API_V3 = 'https://api.binance.com/api/v3/'
SYMBOLS_URL = BINANCE_API_V3 + 'exchangeInfo'
TICKERS_URL = BINANCE_API_V3 + 'ticker/24hr'
ORDERBOOK_URL = BINANCE_API_V3 + 'depth'
ACCOUNT_URL = BINANCE_API_V3 + 'account'
ORDER_URL = BINANCE_API_V3 + 'order'
QUOTE_ASSETS = ['AUD', 'BIDR', 'BKRW', 'BNB', 'BRL', 'BTC', 'BUSD', 'DAI', 'ETH', 'EUR', 'GBP',
'IDRT', 'NGN', 'PAX', 'RUB', 'TRX', 'TRY', 'TUSD', 'UAH', 'USDC', 'USDS', 'USDT',
'XRP', 'ZAR']
def get_quote_assets():
response = requests.get(SYMBOLS_URL).json()
raw_symbols = response['symbols']
quote_assets = np.unique(np.array(list(map(lambda x: x['quoteAsset'], raw_symbols))))
return quote_assets
def split_symbol(symbol):
if symbol[-3:] in QUOTE_ASSETS:
return symbol[:-3], symbol[-3:]
return symbol[:-4], symbol[-4:]
def fetch_tickers():
response = requests.get(TICKERS_URL).json()
tickers = list(map(lambda x: [split_symbol(x['symbol']), float(x['askPrice']), float(x['bidPrice'])], response))
return tickers
def fetch_orderbook(symbol):
orderbook = requests.get(ORDERBOOK_URL, {'symbol': symbol, 'limit' : ORDERBOOK_DEPTH}).json()
return orderbook
def get_timestamp():
return int(time.time() * 1000)
def hashing(query_string):
return hmac.new(SECRET_KEY.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': API_KEY
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
# used for sending request requires the signature
def send_signed_request(http_method, url_path, payload={}):
query_string = urlencode(payload, True)
if query_string:
query_string = "{}×tamp={}".format(query_string, get_timestamp())
else:
query_string = 'timestamp={}'.format(get_timestamp())
url = url_path + '?' + query_string + '&signature=' + hashing(query_string)
params = {'url': url, 'params': {}}
response = dispatch_request(http_method)(**params)
return response.json()
def find_usdt(assets):
for item in assets:
if item['asset'] == 'USDT':
return item
def fetch_balance():
assets = send_signed_request('GET', ACCOUNT_URL)['balances']
return float(find_usdt(assets)['free'])
def limit_order(symbol, side, volume, price):
params = {
"symbol": symbol,
"side": side, # In Capital Letters, e.g. BUY
"type": "LIMIT",
"timeInForce": "GTC",
"quantity": volume,
"price": str(price)
}
send_signed_request('POST', ORDER_URL, params)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-01 07:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0138_auto_20190301_1513'),
]
operations = [
migrations.AlterUniqueTogether(
name='applicationactivity',
unique_together=set([]),
),
migrations.RemoveField(
model_name='applicationactivity',
name='application',
),
migrations.DeleteModel(
name='ApplicationActivity',
),
]
|
def bisection(func,low,high,k,epsilon):
ans = (high + low)/2.0
numGuesses = 0
while abs(func(ans,k)) >= epsilon:
numGuesses += 1
if ans**2 < k:
low = ans
else:
high = ans
ans = (high + low)/2.0
return ans,numGuesses
|
# -*- coding: utf-8 -*-
import logging as log
import time
import findIt
import re
from ga import Ga
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from file import saveStats
from file import readText
from rouge_score import rouge_scorer
# Constants
reference = "Senators McClure (R) and Metzenbaum (D) have sponsored bills to prevent plastic guns from slipping through airport security. The gun, not yet manufactured, is intended for military and police use. Metzenbaum's bill would require some detectable metal content; McClure's would require more sensitive detection equipment at airports, almost certainly causing passenger delays. The NRA opposes the first federal gun ban bill in America, and warns members their guns will soon be inspected and weighed by government agents. However, on this issue they will compromise, not to ban the gun, but to increase airport security. Handgun control advocates and law enforcement officials back Metzenbaum's proposal."
print("Start ...")
print("Reading document ...")
text = readText("training/AP880310-0257")
sentences = sent_tokenize(text)
# Convert genomes to normal summary text
indexs = [9,12, 13, 18, 24, 25, 26, 34, 35, 36 ,40, 42]
hypothesis = ""
for i in indexs:
hypothesis+=(sentences[i])
print("\nЛучший набор предложении")
print(indexs)
# calculate Rouge
scores = rouge_scorer.RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
scores = scores.score(hypothesis,reference)
# save results to file
print(scores)
print("Finish!") |
success numbers of shp
>>> import os
... import glob
... from arcpy import env
... env.workspace = "G:/7gaodetraffic/"
... # Open one of the files,
... folder_path = 'G:/7gaodetraffic/4delete duplicate/'
... road= glob.glob(r'G:/7gaodetraffic/4delete duplicate/*.shp')
... print len(road)
...
96
numbers of fields success
import arcpy.da
... import time,os
... import arcpy
... arcpy.env.workspace = "G:/7gaodetraffic/7poitsjoin"
... inFeatures = "2422.shp"
... fieldObjList = arcpy.ListFields(inFeatures)
... print fieldObjList
... print len(fieldObjList)
...
...
list filed
import arcpy.da
... import time,os
... import arcpy
... arcpy.env.workspace = "G:/7gaodetraffic/7poitsjoin"
... fields = arcpy.ListFields("2422.shp")
... print fields
...
...
import arcpy.da
... import time,os
... import arcpy
... def getFieldNames(shp):
... fieldnames = [f.name for f in arcpy.ListFields(shp)]
... return fieldnames
... fieldnames = getFieldNames("country.shp")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 14:19:26 2019
@author: vener
"""
import pandas as pd
from bisect import bisect_left
def take_closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return 0
if pos == len(myList):
return - 1
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return pos
else:
return pos - 1
kdata = pd.read_csv("kidney_data.csv")
ldata = pd.read_csv("lung_data.csv")
CHRs = list(kdata['Chromosome'].unique())
for chromosome in CHRs:
df = pd.read_csv("data_chr\emptydf_chr%s.csv" %chromosome)
start_v = df[df.columns[0]].tolist()
end_v = df[df.columns[1]].tolist()
chr1 = pd.read_csv("data_chr\chr%s.csv" %chromosome)
for index, row in chr1.iterrows():
pos_s = take_closest(start_v, row.Start)
pos_e = take_closest(end_v, row.End)
df.loc[pos_s:pos_e, row.GDC_Aliquot] = row.Segment_mean
df.to_csv(r"D:\vener\Documents\Bioinformatica\progetto\Bioinfo-master\data_chr\df_chr%s.csv" %chromosome, index=True, header=True)
|
"""
1970. Last Day Where You Can Still Cross
Hard
367
6
Add to List
Share
There is a 1-based binary matrix where 0 represents land and 1 represents water. You are given integers row and col representing the number of rows and columns in the matrix, respectively.
Initially on day 0, the entire matrix is land. However, each day a new cell becomes flooded with water. You are given a 1-based 2D array cells, where cells[i] = [ri, ci] represents that on the ith day, the cell on the rith row and cith column (1-based coordinates) will be covered with water (i.e., changed to 1).
You want to find the last day that it is possible to walk from the top to the bottom by only walking on land cells. You can start from any cell in the top row and end at any cell in the bottom row. You can only travel in the four cardinal directions (left, right, up, and down).
Return the last day where it is possible to walk from the top to the bottom by only walking on land cells.
Example 1:
Input: row = 2, col = 2, cells = [[1,1],[2,1],[1,2],[2,2]]
Output: 2
Explanation: The above image depicts how the matrix changes each day starting from day 0.
The last day where it is possible to cross from top to bottom is on day 2.
Example 2:
Input: row = 2, col = 2, cells = [[1,1],[1,2],[2,1],[2,2]]
Output: 1
Explanation: The above image depicts how the matrix changes each day starting from day 0.
The last day where it is possible to cross from top to bottom is on day 1.
Example 3:
Input: row = 3, col = 3, cells = [[1,2],[2,1],[3,3],[2,2],[1,1],[1,3],[2,3],[3,2],[3,1]]
Output: 3
Explanation: The above image depicts how the matrix changes each day starting from day 0.
The last day where it is possible to cross from top to bottom is on day 3.
Constraints:
2 <= row, col <= 2 * 104
4 <= row * col <= 2 * 104
cells.length == row * col
1 <= ri <= row
1 <= ci <= col
All the values of cells are unique.
Accepted
7,319
Submissions
15,206
"""
class Solution:
def latestDayToCross(self, row: int, col: int, cells: List[List[int]]) -> int:
def get_array(d):
arr = [ [0] * col for i in range(row)]
for i in range(d):
arr[cells[i][0] - 1][cells[i][1] - 1] = 1
return arr
def can_cross(arr, r, c, visited):
key = (r,c)
if key in visited:
return False
visited.add(key)
if c == col or c < 0 or r < 0:
return False
elif r == row:
return True
elif arr[r][c] == 1:
if r > 0:
return False
else:
return can_cross(arr, r, c + 1, visited)
if can_cross(arr, r + 1, c, visited) or can_cross(arr, r, c - 1, visited) or can_cross(arr, r, c + 1, visited) or can_cross(arr, r - 1, c, visited):
return True
else:
return False
#binary search
low = 0
high = len(cells)
while high > low:
mid = int((high + low) / 2)
arr = get_array(mid)
res = can_cross(arr, 0, 0, set())
if res:
low = mid + 1
else:
high = mid
return high - 1
|
from assignment3_3 import *
from sklearn_Kmeans_3 import *
header = ['id','date','tweet']
df=pd.read_csv(open('foxnewshealth.txt'), sep = '|', header=None, names=header, engine='c')
skdf = preprocess(df)
df = preprocess(df)
# sklearnKMeans(skdf)
x = vectorize(df)
clf = K_Means()
clf.fit(x)
predictions = results(x, clf)
df['labels'] = predictions
df.to_csv('scratchKMeans.csv', sep=',', index = None, header=None)
# print(df)
|
import os
#每次修改这个目录
d=r'D:\友高工作\发布内容要求\城堡\14'
lst = os.listdir(d)
n = len(lst)
for a in lst:
os.rename(d+"\\"+a, d+"\\"+str(lst.index(a)+1)+".JPG")
print("ok")
|
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
class MySQLAdapter(BaseAdapter):
drivers = ('MySQLdb','pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'json': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT %(null)s %(unique)s, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
'big-reference': 'BIGINT %(null)s %(unique)s, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
QUOTE_TEMPLATE = "`%s`"
def varquote(self,name):
return varquote_aux(name,'`%s`')
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
parameters[0], parameters[1])
def EPOCH(self, first):
return "UNIX_TIMESTAMP(%s)" % self.expand(first)
def CONCAT(self, *items):
return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def CAST(self, first, second):
if second=='LONGTEXT': second = 'CHAR'
return 'CAST(%s AS %s)' % (first, second)
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
table_rname = table.sqlsafe
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname,
'SET FOREIGN_KEY_CHECKS=1;']
def _insert_empty(self, table):
return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,key):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
|
import struct
messageTypes = {
'Accept' : 0x1,
'Reject' : 0x2,
'Join' : 0x3,
'Status' : 0x4,
'Task' : 0x5,
'Start' : 0x6,
'Stop' : 0x7,
'Finished' : 0x8,
'Disconnect' : 0x9,
#reverse
0x1 : 'Accept',
0x2 : 'Reject',
0x3 : 'Join',
0x4 : 'Status',
0x5 : 'Task',
0x6 : 'Start',
0x7 : 'Stop',
0x8 : 'Finished',
0x9 : 'Disconnect'
}
def intToBEByteStr(number):
return struct.pack('>I', number)
def longintToBEByteStr(number):
return struct.pack('>Q', number)
def BEByteStrToInt(str):
return struct.unpack('>I', str)[0]
def createMessage(messTypeName, messData = ''):
result = False
if messTypeName == 'Join':
result = bytes([messageTypes[messTypeName]]) + intToBEByteStr(len(messData)) + messData.encode('utf-8')
if messTypeName == 'Accept':
#accept message data is 64bit unix time
if (messData == ''):
result = bytes([messageTypes[messTypeName]])
else:
result = bytes([messageTypes[messTypeName]]) + longintToBEByteStr(messData)
if messTypeName == 'Reject':
result = bytes([messageTypes[messTypeName]]) + intToBEByteStr(len(messData)) + messData.encode('utf-8')
if messTypeName == 'Status':
result = bytes([messageTypes[messTypeName]])+ bytes( [messData] )
if messTypeName == 'Finished':
result = bytes([messageTypes[messTypeName]]) + intToBEByteStr(len(messData)) + messData
if messTypeName == 'Disconnect':
result = bytes([messageTypes[messTypeName]]) + intToBEByteStr(len(messData)) + messData.encode('utf-8')
if messTypeName == 'Start':
result = bytes([messageTypes[messTypeName]]) + messData
return result
def getTypeOfMessage(msg):
return int(msg[0])
def GetMsgLenData(msg):
""" returns len data from message and checks it. Input = len|data|anybytes"""
length = BEByteStrToInt(msg[0:4])
msgdata = msg[4 : 4 + length]
if len(msgdata) != length:
print('ERROR: len(msgdata) != length in message')
return length, msgdata
def readMessage(msg):
""" returns {} with key 'type', other keys are defined by messageType"""
result = {}
result.update( {'type': getTypeOfMessage(msg) })
msg = msg[1:]
if result['type'] == 'Reject':
length, msgdata = GetMsgLenData(msg)
result.update( {'length': length, 'reason': msgdata} )
if result['type'] == 'Task':
parlength, pardata = GetMsgLenData(msg)
msg = msg[parlength:]
codelength, codedata = GetMsgLenData(msg)
result.update ({'parametrs' : pardata, 'code' : codedata })
return result |
subs = int(raw_input("Enter No. of Subjects: "));
marksList = []
for i in range(0,int(subs)):
sub = raw_input("Enter Subject Name: ");
marks = int(raw_input("Enter Marks: "));
marksList.append({sub:marks})
print marksList
print "-----------------------"
for j in range(0,len(marksList)):
for key,value in marksList[j].items():
print key+":"+str(value)
|
class Node():
def __init__(self, vertex):
self.vertex = vertex;
self.value = None;
self.edge = [];
class Graph():
def __init__(self):
self.vertexes = [];
def add_vertex(x):
vertice = Node(x);
self.vertexes.append(vertice)
def remove_vertex(x):
for vertice in self.vertexes:
if vertice.vertex == x:
del self.vertexes[vertice]
def add_edge(x,y):
for vertice in self.vertexes:
if vertice.vertex == y:
if x not in vertice.edge:
vertice.edge.append(x);
return True
def remove_edge(x,y):
for vertice in self.vertexes:
if vertice.vertex == y:
if x in vertice.edge:
del vertice.edge[x]
def get_vertex_value(x):
for vertice in self.vertexes:
if vertice.vertex == x:
return vertice.value
def set_vertex_value(x,v):
for vertice in self.vertexes:
if vertice.vertex == x:
vertice.value = v
def adjacent(x,y):
for vertice in self.vertexes:
if vertice.vertex == x:
if y in vertice.edge:
return True
else:
return False
def neighbors(x):
for vertice in self.vertexes:
if vertice.vertex == x:
return vertice.edge
|
# Imports here
%matplotlib inline
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
from torchvision import transforms, models
# Load the Data
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# Build and train
from collections import OrderedDict
from torch import nn
import torch.optim as optim
# Sanity Checking
import seaborn as sns
# Load the data
data_dir = './flower_data'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
# TODO: Define your transforms for the training and validation sets
data_transforms = {
'train': transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
# transforms.CenterCrop(32),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])]),
'valid': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]),
'test': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
}
# TODO: Load the datasets with ImageFolder
image_datasets = {
'train' : datasets.ImageFolder(train_dir,transform=data_transforms['train']),
'valid' : datasets.ImageFolder(valid_dir,transform=data_transforms['valid'])
}
# TODO: Using the image datasets and the trainforms, define the dataloaders
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 30
# percentage of training set to use as validation
valid_size = 0.2
# obtain training indices that will be used for validation
num_train = len(image_datasets['train'])
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
dataloaders = {
'train' : torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, sampler=train_sampler, num_workers=num_workers),
'valid' : torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers),
'test' : torch.utils.data.DataLoader(image_datasets['valid'], batch_size=batch_size, num_workers=num_workers)
}
# Label mapping
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Building and training the classifier
# TODO: Build and train your network
model = models.vgg19(pretrained = True)
# freeze all VGG parameters since we're only optimizing the target image
for param in model.parameters():
param.requires_grad_(False)
model.classifier
# Create classifier using Sequential with OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 1024)),
('relu', nn.ReLU()),
('fc2', nn.Linear(1024, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.classifier
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01)
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
model.cuda()
# move the model to GPU, if available
device = torch.device("cuda" if train_on_gpu else "cpu")
# model.to(device)
## Train the Network
# number of epochs to train the model
n_epochs = 2 #30
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
train_acc = 0.0
valid_loss = 0.0
valid_acc = 0.0
###################
# train the model #
###################
model.train()
train_correct = 0.0
for data, target in dataloaders['train']:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
_, preds = torch.max(output,1)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
train_correct += torch.sum(preds == target.data)
######################
# validate the model #
######################
model.eval()
validate_correct = 0.0
for data, target in dataloaders['valid']:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
equal = (output.max(dim=1)[1] == target.data)
# update average validation loss
valid_loss += loss.item()*data.size(0)
validate_correct += torch.sum(equal)#type(torch.FloatTensor)
# calculate average losses
train_loss = train_loss/len(dataloaders['train'].dataset)
train_acc = train_correct.double()/len(dataloaders['train'].dataset)
valid_loss = valid_loss/len(dataloaders['valid'].dataset)
valid_acc = validate_correct.double()/len(dataloaders['valid'].dataset)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tAcc: {:.6f} \n\t\tValidation Loss: {:.6f} \tAcc: {:.6f}'.format(
epoch, train_loss, train_acc, valid_loss, valid_acc))
# TODO: Save the checkpoint
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
model.class_to_idx = image_datasets['train'].class_to_idx
checkpoint = {'arch': 'vgg19',
'model_state': model.state_dict(),
'criterion_state': criterion.state_dict(),
'optimizer_state': optimizer.state_dict(),
'class_to_idx': model.class_to_idx,
'epochs': n_epochs,
'best_train_loss': train_loss,
# 'Best train accuracy': epoch_train_accuracy,
'best_validation_loss': valid_loss,
# 'Best Validation accuracy': epoch_val_acc
}
torch.save(checkpoint, 'model_imgclassifier.pt')
valid_loss_min = valid_loss
# Save the checkpoint
# Done above
# Loading the checkpoint
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load_model(checkpoint_path):
checkpoint = torch.load('model_imgclassifier.pt')
if checkpoint['arch'] == 'vgg19':
model = models.vgg19(pretrained=True)
for param in model.parameters():
param.requires_grad_(False)
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 1024)),
('relu', nn.ReLU()),
('fc2', nn.Linear(1024, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(checkpoint['model_state'])
criterion.load_state_dict(checkpoint['criterion_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
model.class_to_idx = checkpoint['class_to_idx']
n_epochs = checkpoint['epochs']
train_loss = checkpoint['best_train_loss']
valid_loss = checkpoint['best_validation_loss']
return model
model_load = load_model('model_imgclassifier.pt')
# Image Preprocessing
# TODO: Process a PIL image for use in a PyTorch model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Open the image
from PIL import Image
img = Image.open(image)
# Resize
if img.size[0] > img.size[1]:
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop
left_margin = (img.width-224)/2
bottom_margin = (img.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
img = np.array(img)/255
mean = np.array([0.485, 0.456, 0.406]) #provided mean
std = np.array([0.229, 0.224, 0.225]) #provided std
img = (img - mean)/std
# Move color channels to first dimension as expected by PyTorch
img = img.transpose((2, 0, 1))
return img
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
if title:
plt.title(title)
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
# image = image.numpy().transpose((1, 2, 0))
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
_= imshow(process_image('image_sample.jpg'))
# Class Prediction
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
# Process image
img = process_image(image_path)
# Numpy -> Tensor
image_tensor = torch.from_numpy(img).type(torch.FloatTensor)
# Add batch of size 1 to image
model_input = image_tensor.unsqueeze(0)
# Probs
probs = torch.exp(model.forward(model_input))
# Top probs
top_probs, top_labs = probs.topk(topk)
top_probs = top_probs.detach().numpy().tolist()[0]
top_labs = top_labs.detach().numpy().tolist()[0]
# Convert indices to classes
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in top_labs]
top_flowers = [cat_to_name[idx_to_class[lab]] for lab in top_labs]
return top_probs, top_labels, top_flowers
# Sanity Checking
# TODO: Display an image along with the top 5 classes
def plot_solution(image_path, model):
# Set up plot
plt.figure(figsize = (6,10))
ax = plt.subplot(2,1,1)
# Set up title
# flower_num = image_path.split('/')[1]
# title_ = label_map[flower_num]
# Plot flower
img = process_image(image_path)
imshow(img, ax);
# Make prediction
probs, labs, flowers = predict(image_path, model)
# Plot bar chart
plt.subplot(2,1,2)
sns.barplot(x=probs, y=flowers, color=sns.color_palette()[0]);
plt.show()
plot_solution('image_sample.jpg', model_load) |
# Generated by Django 2.2.13 on 2020-10-23 17:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shared_models', '0009_auto_20201023_0836'),
]
operations = [
migrations.AlterModelOptions(
name='branch',
options={'ordering': ['name'], 'verbose_name': 'Branch - Directorate (NCR)', 'verbose_name_plural': 'Branches - Directorates (NCR)'},
),
migrations.AlterModelOptions(
name='division',
options={'ordering': ['name'], 'verbose_name': 'Division - Branch (NCR)', 'verbose_name_plural': 'Divisions - Branches (NCR)'},
),
migrations.AlterModelOptions(
name='region',
options={'ordering': ['name'], 'verbose_name': 'Region - Sector (NCR)', 'verbose_name_plural': 'Regions - Sectors (NCR)'},
),
migrations.AlterModelOptions(
name='section',
options={'ordering': ['division__branch__region', 'division__branch', 'division', 'name'], 'verbose_name': 'Section - Team (NCR)', 'verbose_name_plural': 'Sections - Teams (NCR)'},
),
]
|
# Generated by Django 3.1.1 on 2021-04-06 18:03
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Administrator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=255)),
('lastname', models.CharField(max_length=255)),
('address', models.TextField()),
('tel', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(blank=True, default='slndxfwq', max_length=50)),
('role', models.CharField(choices=[('admin', 'admin'), ('technicien', 'technicien'), ('meteorologue', 'meteorologue')], default='technicien', max_length=50)),
('theme', models.CharField(blank=True, default='secondary', max_length=50, null=True)),
('profile', models.ImageField(blank=True, null=True, upload_to='images/')),
('last_connection', models.DateTimeField(blank=True, null=True)),
('is_online', models.BooleanField(blank=True, default=False, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created_at', models.DateField(auto_now_add=True)),
('update_at', models.DateField(auto_now=True)),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Meteorologist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=255)),
('lastname', models.CharField(max_length=255)),
('address', models.TextField()),
('tel', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(blank=True, default='slndxfwq', max_length=50)),
('role', models.CharField(choices=[('admin', 'admin'), ('technicien', 'technicien'), ('meteorologue', 'meteorologue')], default='technicien', max_length=50)),
('theme', models.CharField(blank=True, default='secondary', max_length=50, null=True)),
('profile', models.ImageField(blank=True, null=True, upload_to='images/')),
('last_connection', models.DateTimeField(blank=True, null=True)),
('is_online', models.BooleanField(blank=True, default=False, null=True)),
('statut', models.BooleanField(default=True)),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('trash_at', models.DateField(blank=True, null=True)),
('activate_at', models.DateTimeField(blank=True, null=True)),
('trash', models.BooleanField(default=False)),
('statut', models.BooleanField(default=True)),
('map', models.TextField(blank=True, null=True)),
('is_valid', models.BooleanField(default=True)),
('department', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.department')),
('meteorologist', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='connection.meteorologist')),
],
),
migrations.CreateModel(
name='Technician',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=255)),
('lastname', models.CharField(max_length=255)),
('address', models.TextField()),
('tel', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(blank=True, default='slndxfwq', max_length=50)),
('role', models.CharField(choices=[('admin', 'admin'), ('technicien', 'technicien'), ('meteorologue', 'meteorologue')], default='technicien', max_length=50)),
('theme', models.CharField(blank=True, default='secondary', max_length=50, null=True)),
('profile', models.ImageField(blank=True, null=True, upload_to='images/')),
('last_connection', models.DateTimeField(blank=True, null=True)),
('is_online', models.BooleanField(blank=True, default=False, null=True)),
('statut', models.BooleanField(default=True)),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
('is_valid', models.BooleanField(default=True)),
('meteorologist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.meteorologist')),
('station', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='connection.station')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('created_at', models.DateField(auto_now_add=True)),
('update_at', models.DateField(auto_now=True)),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.administrator')),
],
),
migrations.CreateModel(
name='RainData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('created_at', models.DateField(default=datetime.datetime(2021, 4, 6, 18, 3, 2, 914820))),
('update_at', models.DateField(auto_now=True)),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
('station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.station')),
('technician', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.technician')),
],
),
migrations.AddField(
model_name='department',
name='region',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='connection.region'),
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_note', models.DateField(auto_now_add=True)),
('content', models.TextField()),
('trash', models.BooleanField(default=False)),
('trash_at', models.DateField(blank=True, null=True)),
('station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.station')),
('technician', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='connection.technician')),
],
),
]
|
import os
import numpy as np
import torch.utils.data
from torchvision.datasets import ImageFolder
from torchvision import transforms
import functools
import PIL
class StoryDataset(torch.utils.data.Dataset):
def __init__(self, image_path,
transform, is_train = True):
self.dir_path = image_path
self.descriptions = np.load(image_path +'CLEVR_dict.npy', allow_pickle=True, encoding = 'latin1' ).item()
self.transforms = transform
self.srt = 0
self.edn = 10000
if not is_train:
self.srt = 10000 # offset?
self.edn = 13000
self.video_len = 4
def __getitem__(self, item):
label = []
super_label = []
image = []
des = []
item = item + self.srt
for i in range(self.video_len):
v = '%simages/CLEVR_new_%06d_%d.png' % (self.dir_path, item, i+1)
image_pos = v.split('/')[-1]
im = np.array(PIL.Image.open(v))
image.append( np.expand_dims(im[...,:3], axis = 0) )
des.append(np.expand_dims(self.descriptions[image_pos].astype(np.float32), axis = 0))
l = des[-1].reshape(-1)
label.append(l[i*18 + 3: i*18 + 11])
super_label.append(l[i*18:i*18+15])
label[0] = np.expand_dims(label[0], axis = 0)
super_label[0] = np.expand_dims(super_label[0], axis = 0)
for i in range(1,4):
label[i] = label[i] + label[i-1]
super_label[i] = super_label[i] + super_label[i-1]
temp = label[i].reshape(-1)
super_temp = super_label[i].reshape(-1)
temp[temp>1] = 1
super_temp[super_temp>1] = 1
label[i] = np.expand_dims(temp, axis = 0)
super_label[i] = np.expand_dims(super_temp, axis = 0)
des = np.concatenate(des, axis = 0)
image_numpy = np.concatenate(image, axis = 0)
image = self.transforms(image_numpy)
label = np.concatenate(label, axis = 0)
super_label = np.concatenate(super_label, axis = 0)
# image is T x H x W x C
# After transform, image is C x T x H x W
des = torch.tensor(des)
## des is attribute, subs is encoded text description
return {'images': image,
'description': des,
'labels': super_label}
def __len__(self):
return self.edn - self.srt + 1
class ImageDataset(torch.utils.data.Dataset):
# make sure hyperparameters are same as pororoSV
def __init__(self, image_path, transform,
segment_transform=None, use_segment=False, segment_name='img_segment',
is_train = True):
self.dir_path = image_path
self.transforms = transform
self.segment_transform = segment_transform
self.descriptions = np.load(image_path +'CLEVR_dict.npy', allow_pickle=True, encoding = 'latin1').item()
self.transforms = transform
self.use_segment = use_segment
self.srt = 0
self.edn = 10000
if not is_train:
self.srt = 10000 # offset?
self.edn = 13000
self.video_len = 4
def __getitem__(self, item):
item = item + self.srt
se = np.random.randint(1,self.video_len+1, 1)
path = '%simages/CLEVR_new_%06d_%d.png' % (self.dir_path, item, se)
im = PIL.Image.open(path)
image = np.array(im)[...,:3]
image = self.transforms(image)
img_pos = path.split('/')[-1]
des = self.descriptions[img_pos].astype(np.float32)
label = des[3:11]
super_label = des[:15]
content = []
for i in range(self.video_len):
v = '%simages/CLEVR_new_%06d_%d.png' % (self.dir_path, item, i+1)
img_pos = v.split('/')[-1]
content.append(np.expand_dims(self.descriptions[img_pos].astype(np.float32), axis = 0))
for i in range(1,4):
label = label + des[i*18 + 3: i*18 + 11]
super_label = super_label + des[i*18:i*18+15]
label = label.reshape(-1)
super_label = super_label.reshape(-1)
label[label>1] = 1
super_label[super_label>1] = 1
content = np.concatenate(content, 0)
content = torch.tensor(content)
## des is attribute, subs is encoded text description
output = {'images': image,
'description': des,
'labels':super_label,
'content': content
}
# load segment image label
if self.use_segment:
mask_name = '%simages/CLEVR_new_%06d_%d_mask.png' % (self.dir_path, item, i+1)
mask_im = PIL.Image.open(mask_name).convert('L')
mask_im = self.segment_transform( np.array(mask_im) )
output['images_seg'] = mask_im
return output
def __len__(self):
return self.edn - self.srt + 1
if __name__ == "__main__":
from datasets.utils import video_transform
n_channels = 3
image_transforms = transforms.Compose([
PIL.Image.fromarray,
transforms.Resize((64, 64)),
transforms.ToTensor(),
lambda x: x[:n_channels, ::],
transforms.Normalize((0.5, 0.5, .5), (0.5, 0.5, 0.5)),
])
image_transforms_seg = transforms.Compose([
PIL.Image.fromarray,
transforms.Resize((64, 64) ),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
video_transforms = functools.partial(video_transform, image_transform=image_transforms)
imagedataset = ImageDataset('./CLEVR/', (image_transforms, image_transforms_seg))
storydataset = StoryDataset('./CLEVR/', video_transforms)
storyloader = torch.utils.data.DataLoader(
imagedataset, batch_size=13,
drop_last=True, shuffle=True, num_workers=8)
for batch in storyloader:
print(batch['description'].shape, batch['content'].shape, batch['labels'].shape)
break |
from flask import Flask, render_template
import util
app = Flask(__name__)
username='raywu1990'
password='test'
host='127.0.0.1'
port='5432'
database='dvdrental'
@app.route('/')
def index():
cursor, connection = util.connect_to_db(username,password,host,port,database)
record = util.run_and_fetch_sql(cursor, "SELECT * from basket_a;")
col_names = [desc[0] for desc in cursor.description]
log = record[:5]
util.disconnect_from_db(connection,cursor)
return render_template('index.html', sql_table = log, table_title=col_names)
@app.route('/api/update_basket_a')
def index2():
cursor, connection = util.connect_to_db(username,password,host,port,database)
result = util.run_and_commit_sql(cursor, connection, "INSERT INTO basket_a (a, fruit_a) VALUES(5, 'Cherry');")
if result == 1:
loght = 'Success!'
else:
loght = result
util.disconnect_from_db(connection,cursor)
return render_template('index.html', log_html = loght)
@app.route('/api/unique')
def index3():
cursor, connection = util.connect_to_db(username,password,host,port,database)
record = util.run_and_fetch_sql(cursor, "SELECT * FROM basket_a WHERE fruit_a NOT IN (SELECT fruit_b FROM basket_b) UNION (SELECT * FROM basket_b WHERE fruit_b NOT IN (SELECT fruit_a FROM basket_a))")
if record != 1:
loght = record
col_names = [desc[0] for desc in cursor.description]
log = record[:5]
util.disconnect_from_db(connection,cursor)
return render_template('index.html', sql_table = log, table_title=col_names)
if __name__ == '__main__':
# set debug mode
app.debug = True
# your local machine ip
ip = '127.0.0.1'
app.run(host=ip)
|
import pandas as pd
import numpy as np
from decisiontree import DTree
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
pd.set_option('display.max_columns', 1000)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.width', 1000)
np.set_printoptions(linewidth=1000)
np.set_printoptions(suppress=True)
base_file_name = './train/base_info.csv'
label_file_name = './train/entprise_info.csv'
test_file_name = './entprise_evaluate.csv'
# 读取文件
base = pd.read_csv(base_file_name)
label = pd.read_csv(label_file_name)
# print(base.head(1))
# 处理缺失值
missingDF = base.isnull().sum().sort_values(ascending=False).reset_index()
missingDF.columns = ['feature', 'miss_num']
# print(missingDF)
missingDF['miss_percentage'] = missingDF['miss_num'] / base.shape[0]
# 输出每列的缺失数量和缺失率
# print(missingDF)
# print(base.shape)
# 设置去除缺失率的阈值
thr = (1 - 0.2) * base.shape[0]
# 去除缺失率较大的特征
base = base.dropna(thresh=thr, axis=1)
# print(base.columns)
# 删除企业的经营地址、经营范围、经营场所等无用数据
del base['dom'], base['oploc'], base['opscope']
# print(base.columns)
# 合并base文件和标签文件
base1 = pd.merge(base, label, on='id', how='left')
# 删除id列,日期列,大写字母列, orgid:机构标识, jobid:职位标识
del base1['industryphy'], base1['opfrom'], base1['orgid'], base1['jobid']
# 用于存储预测标签
temp_test_set = base1[base1['label'].isnull()]
final_test_set = pd.DataFrame(columns=['id', 'label'])
final_test_set['id'] = temp_test_set['id']
final_test_set['label'] = temp_test_set['label']
del base1['id']
# 分割出训练集
train_set = base1[base1['label'].notnull()]
y_train_label = train_set['label']
# 训练集的标签
train_label_list = list(train_set.columns)
# print(train_label_list)
has_nan = list(train_set.isnull().sum() > 0)
# 含有少量缺失值的特征
nan_list = []
for i in range(len(has_nan)):
if has_nan[i]:
nan_list.append(train_label_list[i])
# 填充缺失值
for i in nan_list:
# print(train_set[i].median())
train_set[i].fillna(train_set[i].median(), inplace=True)
# print(train_set.isnull().sum()>0)
del train_set['label']
# print(train_set)
# print(y_train_label)
# print(train_label_list)
test_set = base1[base1['label'].isnull()]
del test_set['label']
test_columns_names = list(test_set.columns)
test_nan_list = list(test_set.isnull().sum() > 0)
# 需要填充的列
columns_to_fill = []
for i in range(len(test_columns_names)):
if test_nan_list[i]:
columns_to_fill.append(test_columns_names[i])
# 测试集填充缺失值
for i in columns_to_fill:
test_set[i].fillna(test_set[i].median(), inplace=True)
# print(test_set)
test_set_array = test_set.to_numpy()
# 生成二叉决策树,训练集中类别数量
# num_0: 13884
# num_1: 981
dt = DecisionTreeClassifier()
dt.fit(train_set, y_train_label)
# ans = []
# for i in range(test_set_array.shape[0]):
# print(i)
# ans.append(dt.predict([test_set_array[i, :]]))
test_label = dt.predict(test_set)
# 将预测结果保存并写入文件
# print(test_label.count(0))
final_test_set['label'] = test_label
# 需要提交的顺序
test_submit = pd.read_csv(test_file_name)
del test_submit['score']
# 合并提交文件和预测结果
test_submit = pd.merge(test_submit, final_test_set, on='id', how='left')
test_submit.rename(columns={'label': 'score'}, inplace=True)
# print(test_submit)
save_test_res_name = './entprise_evaluate1_C4.5.csv'
test_submit.to_csv(save_test_res_name, sep=',', header=True, index=False)
|
def get_summ(first, second, delimiter="&"):
first = str(first).upper()
second = str(second).upper()
return f"{first}{delimiter}{second}"
to_print = get_summ("Learn", "python", delimiter='&')
print(to_print)
|
try:
file = open("while.py")
file.close()
except OSError:
print("打开文件失败")
try:
file = open("/etc/passwd")
print('文件已经打开')
s = file.reading()
print(s, end='')
file.close()
except IOError:
print('打开文件失败!') |
from fbchat import log, Client
import sys
import subprocess
import os, signal
from subprocess import check_output
# An FBChat "Echobot", set up to pass string values in/out of a local shell using Python's subprocess module
class EchoBot(Client):
def onMessage(self, author_id, message, thread_id, thread_type, **kwargs):
self.markAsDelivered(author_id, thread_id)
self.markAsRead(author_id)
log.info("Message from {} in {} ({}): {}".format(author_id, thread_id, thread_type.name, message))
if author_id != self.uid:
process = subprocess.Popen(message, shell=True, stdout=subprocess.PIPE)
process.wait()
cmd_output = process.stdout.read()
self.sendMessage(cmd_output, thread_id=thread_id, thread_type=thread_type)
#replace "YourFBLoginEmail@example.com", "YourFBPasswordHere" with a real Facebook login
client = EchoBot("YourFBLoginEmail@example.com", "YourFBPasswordHere")
client.listen()
#if it crashes, just start over
os.system("FBcmd.py")
|
from turtle import *
mode('logo')
clearscreen()
speed(0)
size = 4 # size of dots
scaler = 100 # how much to zoom in
screen = 200 # screen of sight in fractal
depth = 10 # recursion for f(z)
res = 5 # how many pixels to jump for next recursion
# current = [-200, 200]
pu()
for a in range(-screen,screen,res):
for b in range(-screen,screen,res):
old = 0
new = 0
# Does it diverge?
flag = False
for z in range(depth):
old = new
new = (old)**2 - (a/scaler)+((b/scaler)*1j)
if abs(new) >= 2: # diverges
flag = True
break
pu()
goto((a,b))
if flag:
pd()
begin_fill()
circle(size)
end_fill()
pu()
while True:
goto((0,0))
|
from parking_project.requests.models import Request
from django.contrib import admin
admin.site.register(Request)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20170321_1255'),
]
operations = [
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('city', models.CharField(max_length=20)),
('vtype', models.CharField(max_length=20, choices=[(b'r', b'residual'), (b'd', b'deal'), (b's', b'see'), (b'nc', b'newcustomer'), (b'nh', b'newhouse')])),
('value', models.IntegerField()),
('created_at', models.DateTimeField(auto_now=True)),
],
),
]
|
from .books.list import book_list
from .books.details import book_details
from .librarians.list import list_librarians
from .libraries.list import list_libraries
from .home import home
from .auth.logout import logout_user
from .books.form import book_form, book_edit_form
from .libraries.form import library_form
from .librarians.details import librarian_details
from .libraries.details import library_details
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 19:15:13 2018
@author: atul
"""
def permute(s):
print("s is : ",s)
out =[]
#Base Case
if len(s) ==1:
out =[s]
else:
#for every letter in string
for i,let in enumerate(s):
#for every permutation resulting from step2 and step3
print("i is : ",i)
for perm in permute (s[:i] + s[i+1:] ):
print ('perm is', perm)
print ('current letter is', let)
#add it to the output
out += [let+perm]
return out
print(permute("123"))
my_list = ['apple', 'banana', 'grapes', 'pear']
for c, value in enumerate(my_list, 1):
print(c, value)
my_list = ['apple', 'banana', 'grapes', 'pear']
counter_list = list(enumerate(my_list, 1))
print(counter_list)
# Output: [(1, 'apple'), (2, 'banana'), (3, 'grapes'), (4, 'pear')]
def details():
my_str = 'apple banana'
for c, value in enumerate(my_str):
print(c, value)
my_str = 'apple banana'
for c, value in enumerate(my_str, 1):
print(c, value)
my_str = 'apple banana'
for c, value in enumerate(my_str, 2):
print(c, value)
#print(details()) |
import unittest
from bin.song import Song
class TestSongAlbum(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
def tearDown(self) -> None:
del self.song
def test_set_album_type(self):
self.assertRaises(TypeError, self.song.set_album, album=3)
self.assertRaises(TypeError, self.song.set_album, album=True)
self.assertRaises(TypeError, self.song.set_album, album=None)
def test_set_album_value(self):
self.assertRaises(ValueError, self.song.set_album, album="")
self.assertRaises(ValueError, self.song.set_album, album='')
def test_set_album_correct(self):
self.assertTrue(self.song.set_album("Name"))
self.assertTrue(self.song.set_album("3"))
self.assertTrue(self.song.set_album("None"))
class TestSongArtist(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
def tearDown(self) -> None:
del self.song
def test_set_artist_type(self):
self.assertRaises(TypeError, self.song.set_artist, artist=3)
self.assertRaises(TypeError, self.song.set_artist, artist=True)
self.assertRaises(TypeError, self.song.set_artist, artist=None)
def test_set_artist_value(self):
self.assertRaises(ValueError, self.song.set_artist, artist="")
self.assertRaises(ValueError, self.song.set_artist, artist='')
def test_set_artist_correct(self):
self.assertTrue(self.song.set_artist("Name"))
self.assertTrue(self.song.set_artist("3"))
self.assertTrue(self.song.set_artist("None"))
class TestSongLength(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
def tearDown(self) -> None:
del self.song
def test_set_length_type(self):
self.assertRaises(TypeError, self.song.set_length, length=True)
self.assertRaises(TypeError, self.song.set_length, length=object)
def test_set_length_value(self):
self.assertRaises(ValueError, self.song.set_length, length="-3")
self.assertRaises(ValueError, self.song.set_length, length=-19.8)
self.assertRaises(ValueError, self.song.set_length, length=-16)
def test_set_length_correct(self):
self.assertTrue(self.song.set_length("3"))
self.assertTrue(self.song.set_length("5.14"))
self.assertTrue(self.song.set_length(34))
self.assertTrue(self.song.set_length(3.14))
class TestSongLengthPretty(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
self.song.set_length(180)
def tearDown(self) -> None:
del self.song
def test_length_pretty_180(self):
self.song.set_length(180)
self.assertEqual(self.song.length_pretty(hours=True, minutes=True, seconds=True), "0:03:00")
self.assertEqual(self.song.length_pretty(minutes=True, seconds=True), "03:00")
self.assertEqual(self.song.length_pretty(seconds=True), ":00")
self.assertEqual(self.song.length_pretty(minutes=True), "03")
self.assertEqual(self.song.length_pretty(hours=True), "0:")
def test_length_pretty_221(self):
self.song.set_length(221)
self.assertEqual(self.song.length_pretty(hours=True, minutes=True, seconds=True), "0:03:41")
self.assertEqual(self.song.length_pretty(minutes=True, seconds=True), "03:41")
self.assertEqual(self.song.length_pretty(seconds=True), ":41")
self.assertEqual(self.song.length_pretty(minutes=True), "03")
self.assertEqual(self.song.length_pretty(hours=True), "0:")
def test_length_pretty_4502(self):
self.song.set_length(4502)
self.assertEqual(self.song.length_pretty(hours=True, minutes=True, seconds=True), "1:15:02")
self.assertEqual(self.song.length_pretty(minutes=True, seconds=True), "15:02")
self.assertEqual(self.song.length_pretty(seconds=True), ":02")
self.assertEqual(self.song.length_pretty(minutes=True), "15")
self.assertEqual(self.song.length_pretty(hours=True), "1:")
class TestSongPath(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
def tearDown(self) -> None:
del self.song
def test_set_path_type(self):
self.assertRaises(TypeError, self.song.set_path, path=True)
self.assertRaises(TypeError, self.song.set_path, path=-8.6)
self.assertRaises(TypeError, self.song.set_path, path=object)
def test_set_path_value(self):
self.assertRaises(ValueError, self.song.set_path, path="True")
self.assertRaises(ValueError, self.song.set_path, path="../data/songs")
self.assertRaises(ValueError, self.song.set_path, path="../data/songs/empty.mp3")
class TestSongTitle(unittest.TestCase):
def setUp(self) -> None:
self.song = Song()
def tearDown(self) -> None:
del self.song
def test_set_title_type(self):
self.assertRaises(TypeError, self.song.set_title, title=3)
self.assertRaises(TypeError, self.song.set_title, title=True)
self.assertRaises(TypeError, self.song.set_title, title=None)
def test_set_title_value(self):
self.assertRaises(ValueError, self.song.set_title, title="")
self.assertRaises(ValueError, self.song.set_title, title='')
def test_set_title_correct(self):
self.assertTrue(self.song.set_title("Name"))
self.assertTrue(self.song.set_title("3"))
self.assertTrue(self.song.set_title("None"))
if __name__ == "__main__":
unittest.main()
# song_test = TestSongMethods()
# song_test.setUp()
# song_test.test_set_album_correct()
# unittest.main(verbosity=2)
# suite = unittest.TestSuite()
# suite.addTest(TestSongMethods)
# suite.addTest(TestSongMethods("test_set_album_type"))
# suite.addTests([TestSongMethods])
|
#!/usr/bin/env python
# Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Dict, Optional
import jinja2
from matter_idl.matter_idl_types import Idl
from .filters import RegisterCommonFilters
class GeneratorStorage:
"""
Handles file operations for generator output. Specifically can create
required files for output.
Is overriden for unit tests.
"""
def __init__(self):
self._generated_paths = set()
@property
def generated_paths(self):
return self._generated_paths
def report_output_file(self, relative_path: str):
self._generated_paths.add(relative_path)
def get_existing_data(self, relative_path: str):
"""Gets the existing data at the given path.
If such data does not exist, will return None.
"""
raise NotImplementedError()
def write_new_data(self, relative_path: str, content: str):
"""Write new data to the given path."""
raise NotImplementedError()
class FileSystemGeneratorStorage(GeneratorStorage):
"""
A storage generator which will physically write files to disk into
a given output folder.
"""
def __init__(self, output_dir: str):
super().__init__()
self.output_dir = output_dir
def get_existing_data(self, relative_path: str):
"""Gets the existing data at the given path.
If such data does not exist, will return None.
"""
target = os.path.join(self.output_dir, relative_path)
if not os.path.exists(target):
return None
logging.info("Checking existing data in %s" % target)
with open(target, 'rt') as existing:
return existing.read()
def write_new_data(self, relative_path: str, content: str):
"""Write new data to the given path."""
target = os.path.join(self.output_dir, relative_path)
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
logging.info("Creating output directory: %s" % target_dir)
os.makedirs(target_dir)
logging.info("Writing new data to: %s" % target)
with open(target, "wt") as out:
out.write(content)
class CodeGenerator:
"""
Defines the general interface for things that can generate code output.
A CodeGenerator takes a AST as input (a `Idl` type) and generates files
as output (like java/cpp/mm/other).
Its public interface surface is reasonably small:
'storage' init argument specifies where generated code goes
'idl' is the input AST to generate
'render' will perform a rendering of all files.
As special optimizations, CodeGenerators generally will try to read
existing data and will not re-write content if not changed (so that
write time of files do not change and rebuilds are not triggered).
"""
def __init__(self, storage: GeneratorStorage, idl: Idl, loader: Optional[jinja2.BaseLoader] = None, fs_loader_searchpath: Optional[str] = None):
"""
A code generator will render a parsed IDL (a AST) into a given storage.
Args:
storage: Storage to use to read/save data
loader: if given, use a custom loader for templates
fs_loader_searchpath: if a loader is NOT given, this controls the search path
of a default FileSystemLoader that will be used
"""
if not loader:
if not fs_loader_searchpath:
fs_loader_searchpath = os.path.dirname(__file__)
loader = jinja2.FileSystemLoader(searchpath=fs_loader_searchpath)
self.storage = storage
self.idl = idl
self.jinja_env = jinja2.Environment(
loader=loader, keep_trailing_newline=True)
self.dry_run = False
RegisterCommonFilters(self.jinja_env.filters)
def render(self, dry_run=False):
"""
Renders all required files given the idl contained in the code generator.
Reset the list of generated outputs.
Args:
dry_run: if true, outputs are not actually written to disk.
if false, outputs are actually written to disk.
"""
self.dry_run = dry_run
self.internal_render_all()
def internal_render_all(self):
"""This method is to be implemented by subclasses to run all generation
as needed.
"""
raise NotImplementedError("Method should be implemented by subclasses")
def internal_render_one_output(self, template_path: str, output_file_name: str, vars: Dict):
"""
Method to be called by subclasses to mark that a template is to be generated.
File will either actually do a jinja2 generation or just log things
if dry-run was requested during `render`.
NOTE: to make this method suitable for rebuilds, this file will NOT alter
the timestamp of the output file if the file content would not
change (i.e. no write will be invoked in that case.)
Args:
template_path - the path to the template to be loaded for file generation.
Template MUST be a jinja2 template.
output_file_name - File name that the template is to be generated to.
vars - variables used for template generation
"""
logging.info("File to be generated: %s" % output_file_name)
if self.dry_run:
return
rendered = self.jinja_env.get_template(template_path).render(vars)
# Report regardless if it has changed or not. This is because even if
# files are unchanged, validation of what the correct output is should
# still be done.
self.storage.report_output_file(output_file_name)
if rendered == self.storage.get_existing_data(output_file_name):
logging.info("File content not changed")
else:
self.storage.write_new_data(output_file_name, rendered)
|
import math
import numpy as np
import ray
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest import ConcurrencyLimiter
import unittest
def loss(config, reporter):
x = config.get("x")
reporter(loss=x**2) # A simple function to optimize
class ConvergenceTest(unittest.TestCase):
"""Test convergence in gaussian process."""
def shutDown(self):
ray.shutdown()
def test_convergence_gaussian_process(self):
np.random.seed(0)
ray.init(local_mode=True, num_cpus=1, num_gpus=1)
# This is the space of parameters to explore
space = {"x": tune.uniform(0, 20)}
resources_per_trial = {"cpu": 1, "gpu": 0}
# Following bayesian optimization
gp = BayesOptSearch(random_search_steps=10)
gp.repeat_float_precision = 5
gp = ConcurrencyLimiter(gp, 1)
# Execution of the BO.
analysis = tune.run(
loss,
metric="loss",
mode="min",
# stop=EarlyStopping("loss", mode="min", patience=5),
search_alg=gp,
config=space,
num_samples=100, # Number of iterations
resources_per_trial=resources_per_trial,
raise_on_failed_trial=False,
fail_fast=True,
verbose=1)
assert len(analysis.trials) in {13, 40, 43} # it is 43 on the cluster?
assert math.isclose(analysis.best_config["x"], 0, abs_tol=1e-5)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from odoo import api, fields, models
class commission_invoice(models.AbstractModel):
_name = 'report.property_commission.commission_report_template2'
@api.multi
def get_datas(self, start_date, end_date):
"""
This method is used to get data from commission invoice line
between to two selacted date.
------------------------------------------------------------
@param self: The object pointer
"""
datas = []
invoice_ids = self.env['commission.invoice'].search(
[('date', '<=', end_date),
('date', '>=', start_date),
('inv', '=', True)])
datas = [{'property': value.property_id.name,
'tenancy': value.tenancy.name,
'commission': value.amount_total,
'agent': value.agent.name}
for value in invoice_ids]
return datas
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(
self.env.context.get('active_ids', []))
start_date = data['form'].get('start_date', fields.Date.today())
end_date = data['form'].get(
'end_date', str(
datetime.now() + relativedelta
(months=+1, day=1, days=-1))[:10])
data_res = self.with_context(
data['form'].get('used_context', {})).get_datas(
start_date, end_date)
docargs = {
'doc_ids': docids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'get_datas': data_res,
}
docargs['data'].update({
'end_date': datetime.strftime(
docargs.get('data').get('end_date'), '%d/%m/%Y'),
'start_date': datetime.strftime(
docargs.get('data').get('start_date'), '%d/%m/%Y')})
return self.env['report'].render(
"property_commission.commission_report_template2", docargs)
|
#https://leetcode.com/problems/divide-two-integers/discuss/837822/Python-clean-solution
def divideWithExtraSteps(dividend, divisor):
sign = +1 if (dividend ^ divisor) >= 0 else -1
dividend, divisor = abs(dividend), abs(divisor)
ans = 0
for power in range(31, -1, -1):
if (divisor << power) <= dividend:
ans += (1 << power)
dividend -= (divisor << power)
ans = 0 - ans if sign == -1 else ans
if not (-2**31 < ans <= 2**31 - 1):
return 2**31 - 1
else:
return ans
if __name__ == '__main__':
print(divideWithExtraSteps(10, 3)) |
import datetime
list_expenses = []
list_incomes = []
list_savings = []
class Expenses:
def __init__(self, cat, subcat, amount, date):
self.cat = cat
self.subcat = subcat
self.amount = amount
self.date = date
def add_amount(self, new_amount):
self.amount += new_amount
def add_element(selection):
''' Add an element in the budget '''
if selection == 'E':
new_expense = input("Which expense to add? ")
exp_amount = int(input("What is the amount of your expense? "))
exp_date = int(input("When did you make the expense? (yyyymmdd) "))
list_expenses.append(Expenses('Expense', new_expense, exp_amount, exp_date))
'''
new = True
for items in list_expenses:
if list_expenses[item].subcat == new_expense:
list_expenses[item].add_amount(exp_amount)
list_expenses[item].date = exp_date
new = False
break
if new == True:
list_expenses.append(Expenses('Expense', new_expense, exp_amount, exp_date))
'''
if selection == 'I':
new_inc = input("Which income to add? ")
inc_amount = int(input("What is the amount of your income? "))
inc_date = int(input("When did you receive the income? (yyyymmdd) "))
list_incomes.append(Expenses('Income', new_inc, inc_amount, inc_date))
if selection == 'S':
new_sav = input("Which saving to add? ")
sav_amount = int(input("What is the amount of your saving? "))
sav_date = int(input("When did you make the saving? (yyyymmdd) "))
list_savings.append(Expenses('Savings', new_sav, sav_amount, sav_date))
def netflow():
totalsum = 0
for item in list_incomes:
totalsum += item.amount
for item in list_expenses:
totalsum -= item.amount
return totalsum
def table(currency):
''' Create a table to display the budget '''
print("Household budget:")
print("Incomes \t Amount | Date")
for item in list_incomes:
print("{:<15} \t {:<4}{} | {:8}".format(item.subcat, item.amount, currency, item.date))
print("\n")
print("Savings \t Amount | Date")
for item in list_savings:
print("{:<15} \t {:<4}{} | {:8}".format(item.subcat, item.amount, currency, item.date))
print("\n")
print("Expenses \t Amount | Date")
for item in list_expenses:
print("{:<15} \t {:<4}{} | {:8}".format(item.subcat, item.amount, currency, item.date))
totalsum = netflow()
print("Total net flow (Incomes - Expenses) \t {}{}".format(totalsum, currency))
#salary = Expenses('Income','Salary', 2500, datetime.date.today())
#table(10, income, salary)
complete = True
currency = input("What is your currency? ")
while complete:
selection = input("What do you want to add in your budget? An expense (E), an income (I) or a saving (S): ")
add_element(selection)
display_table = input("Do you want to see your entire household bugdet? Yes (Y) or no (N): ")
if display_table == 'Y':
table(currency)
keepgoing = input("Do you want to add something else in your budget? Yes (Y) or no (N): ")
if keepgoing != 'Y':
complete = False
|
#coding=utf-8
__author__ = 'JinyouHU'
#define and invoke function
def sum(a,b):
return a+b
func = sum #
r = func(5,6)
print r
#defines functionwith default argument
def add(a,b=2): #如果不给b赋值,会有一个默认的值
return a+b
r = add(1)
print r
r = add(1,5)
print r
# the range() function
a = range(5, 10)
print a
a = range(-2, -7)
print a
a = range(-7, -2)
print a
a = range(-2, -11, -3) #adding by step
print a
def printinfo(name,age=35):
"print every string"
print 'name ',name
print 'age ',age
printinfo(age=50,name='miki')
printinfo('lucy',55)
printinfo('miky')
'''
加了星号(*)的变量名会存放所有未命名的变量参数。选择不多传参数也可。如下实例:
'''
def printUnSureFunc(arg1, *vartuple):
"打印传入的参数"
print 'output: '
print arg1
for var in vartuple:
print var
printUnSureFunc(10)
printUnSureFunc(70, 60, 50, 40)
printUnSureFunc(100, {'name':'miki','age':24})
'''
匿名函数
用lambda关键词能创建小型匿名函数。这种函数得名于省略了用def声明函数的标准步骤。
Lambda函数能接收任何数量的参数但只能返回一个表达式的值,同时只能不能包含命令或多个表达式。
匿名函数不能直接调用print,因为lambda需要一个表达式。
lambda函数拥有自己的名字空间,且不能访问自有参数列表之外或全局名字空间里的参数。
虽然lambda函数看起来只能写一行,却不等同于C或C++的内联函数,后者的目的是调用小函数时不占用栈内存从而增加运行效率。
lambda [arg1 [,arg2,.....argn]]:expression
'''
mysum = lambda arg1,arg2:arg1 + arg2
print 'value of total: ',mysum(10, 20)
print 'value of total: ',mysum(20, 20) |
n=int(input())
i=1
numList=[]
while True:
if '666' in str(i):
numList.append(i)
if len(numList)==n:
break
i += 1
print(numList[n-1])
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import requests
from distutils.version import LooseVersion
from pydashie.dashie_sampler import DashieSampler
class BackstageStoreVersions(DashieSampler):
def name(self):
return 'backstage-store-versions'
def sample(self):
data = [
('QA-API', 'http://api.store-qa.backstage.globoi.com/api/version/'),
('QA-Web', 'http://store-qa.backstage.globoi.com/version'),
('Prod-API', 'http://api.store.backstage.globoi.com/api/version/'),
('Prod-Web', 'http://store.backstage.globoi.com/version'),
]
items = []
for label, url in data:
try:
resp = requests.get(url)
version = resp.json().get('version', '')
except:
version = ''
items.append({'label': label, 'value': version})
data = [
('OmniStore', True, 'http://artifactory.globoi.com/artifactory/api/storage/pypi-local/omnistore'),
('OmniInstaller', False, 'http://artifactory.globoi.com/artifactory/api/storage/pypi-local/omni-installer'),
('StoreClient', False, 'http://artifactory.globoi.com/artifactory/api/storage/pypi-local/store-client'),
]
for label, aux, url in data:
try:
resp = requests.get(url)
versions = resp.json().get('children')
versions = [x['uri'].replace('/', '') for x in versions]
aux_version = ''
if aux:
branch_version = [x for x in versions if x.startswith('0.4')]
branch_version.sort(key=LooseVersion)
items.append({'label': 'OmniStore 0.4', 'value': branch_version[-1]})
versions.sort(key=LooseVersion)
version = "%s%s" % (versions[-1], aux_version)
except:
version = ''
items.append({'label': label, 'value': version})
return {'items': items}
|
grades=[12,4.4,56,45.33]
student_grades={"praveen":12, "chandrika":4.4, "myra":56, "pinkey":45.33}
mysum=sum(student_grades.values())
mycount=len(student_grades.values())
mean=mysum/mycount
print(mean)
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mnist
import os
# raise EOFError("Compressed file ended before the "
# import tempfile
# print(tempfile.gettempdir())
# Then go to that directory and delete train-images-idx3-ubyte.gz.
# rm /tmp/train-images-*
proxy = 'http://127.0.0.1:8123'
os.environ['http_proxy'] = proxy
os.environ['HTTP_PROXY'] = proxy
os.environ['https_proxy'] = proxy
os.environ['HTTPS_PROXY'] = proxy
np.random.seed(42)
X_train, y_train = mnist.train_images(), mnist.train_labels()
X_test, y_test = mnist.test_images(), mnist.test_labels()
num_classes = 10 # classes are the digits from 0 to 9
#
# X_train, X_test = X_train.reshape(-1, 28 * 28), X_test.reshape(-1, 28 * 28)
#
# y_train = np.eye(num_classes)[y_train]
img_idx = np.random.randint(0, X_test.shape[0])
plt.imshow(X_test[img_idx], cmap=matplotlib.cm.binary)
plt.axis("off")
plt.show()
print(y_test[img_idx])
X_train, X_test = X_train.reshape(-1, 28 * 28), X_test.reshape(-1, 28 * 28)
print('Pixel value between {} and {}'.format(X_train.min(), X_train.max()))
X_train, X_test = X_train / 255., X_test / 255
print('Normalized pixel value between {} and {}'.format(X_train.min(), X_train.max()))
y_train = np.eye(num_classes)[y_train]
print(y_train)
|
import logging
from aatest.summation import represent_result
from oic.utils.http_util import Response, NotFound
from aatest.check import ERROR
from aatest.check import OK
from aatest.check import WARNING
from aatest.check import INCOMPLETE
from aatest.io import IO
from saml2test.idp_test.webio import get_test_info
__author__ = 'roland'
logger = logging.getLogger(__name__)
TEST_RESULTS = {OK: "OK", ERROR: "ERROR", WARNING: "WARNING",
INCOMPLETE: "INCOMPLETE"}
class WebIO(IO):
def __init__(self, conf, flows, desc, profile_handler, profile, lookup,
cache=None, environ=None, start_response=None, session=None,
**kwargs):
IO.__init__(self, flows, profile, desc, profile_handler, cache,
session=session, **kwargs)
self.conf = conf
self.lookup = lookup
self.environ = environ
self.start_response = start_response
def flow_list(self):
resp = Response(mako_template="flowlist.mako",
template_lookup=self.lookup,
headers=[])
argv = {
"tests": self.session["tests"],
"profile": self.session["profile"],
"test_info": list(self.session["test_info"].keys()),
"base": self.conf.BASE,
"headlines": self.desc,
"testresults": TEST_RESULTS
}
return resp(self.environ, self.start_response, **argv)
def test_info(self, testid):
resp = Response(mako_template="testinfo.mako",
template_lookup=self.lookup,
headers=[])
_conv = self.session["conv"]
info = get_test_info(self.session, testid)
argv = {
"profile": info["profile_info"],
"trace": info["trace"],
"events": info["events"],
"result": represent_result(_conv.events).replace("\n", "<br>\n")
}
return resp(self.environ, self.start_response, **argv)
def not_found(self):
"""Called if no URL matches."""
resp = NotFound()
return resp(self.environ, self.start_response)
|
## Jason Balkenbush 28 December 2017
## For xls files: code to group rows based on duplicate values in the first column
## groups are converted to new sheets in the output xls file
## rows containing "Domain" in the first cell are excluded from the output
## xlwt does not work with xlsx files, only xls
##
import xlrd
import xlwt
# input excel file
fname = r"C:\temp\test\domain_specs.xls"
# ouput excel file
foname = r"C:\temp\test\domain_specs_modified.xls"
wb = xlrd.open_workbook(fname)
sheets = [sheet.name for sheet in wb.sheets()]
for sheet in sheets:
sh = wb.sheet_by_name(sheet)
nrows = sh.nrows
ncols = sh.ncols
print "{} has {} rows, {} cols".format(sheet,nrows,ncols)
sh1 = wb.sheet_by_name(sheets[1])
vallist = []
#rowlist = []
for i in range(sh1.nrows):
vallist.append(sh1.cell_value(i,0))
# rowlist.append(sh1.row(i))
vset = set(vallist)
vdict = {}
for val in vset:
vdict[val] = [i for i, x in enumerate(vallist) if x == val]
vdict.pop('Domain')
vset.remove('Domain')
# sdict contains key:[[cell1,cell2],[cell1,cell2],...] the key values are cell 2 and cell 3 from each row where the key occurs
sdict = {}
for k in vdict:
for i in vdict[k]:
sdict[k]=[[sh1.cell_value(i,1),sh1.cell_value(i,2)] if i in vdict[k] else i for i in vdict[k]]
# create sheet list
slist = []
for s in sdict:
slist.append(s)
# Create the new sheets
wtwb = xlwt.Workbook(foname)
for s in slist:
wtwb.add_sheet(s,'cell_overwrite_ok=True')
# Write new cells
for s in slist:
wtsh = wtwb.get_sheet(slist.index(s))
for i in range(len(sdict[s])):
v1 = sdict[s][i][0]
v2 = sdict[s][i][1]
wtsh.write(0,0,u"code")
wtsh.write(0,1,u"description")
wtsh.write(i+1,0,v1)
wtsh.write(i+1,1,v2)
wtwb.save(foname)
|
Suppose we're working with 8 bit quantities (for simplicity's sake) and suppose we want to find how -28 would be expressed in two's complement notation. First we write out 28 in binary form.
00011100
Then we invert the digits. 0 becomes 1, 1 becomes 0.
11100011
Then we add 1.
11100100
That is how one would write -28 in 8 bit binary.
Conversion from Two's Complement
Use the number 0xFFFFFFFF as an example. In binary, that is:
1111 1111 1111 1111 1111 1111 1111 1111
#reference
https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html
# x & 0xFFFFFFFF == x
# will return True if x doesn't oveflow and x is larger than 0.
# 1.Why carry is a&b:
# If a and b are both 1 at the same digit, it creates one carry.
# Because you can only use 0 and 1 in binary, if you add 1+1 together, it will roll that over to the next digit, and the value will be 0 at this digit.
# if they are both 0 or only one is 1, it doesn't need to carry.
# Use ^ operation between a and b to find the different bit
# In my understanding, using ^ operator is kind of adding a and b together (a+b) but ignore the digit that a and b are both 1,
# because we already took care of this in step1.
def getSum(self, a: int, b: int) -> int:
carry = 0
mask = 0xffffffff #all ones ==MAX_INT =2**32-1
while b & mask != 0:
carry = (a & b) << 1
a = a ^ b
b = carry
# for overflow condition like
# -1
# 1
return a&mask if b > mask else a
#more explanation
https://leetcode.com/problems/sum-of-two-integers/discuss/167931/Solution-with-ACTUAL-explanation-(how-you-would-work-this-out)
|
import numpy as np
class Expression:
def MainMethod(self):
m1 = np.random.randint(0, 20, (3, 3))
m2 = np.random.randint(0, 20, (3, 3))
self.PrintMatrix(m1)
self.PrintMatrix(m2)
result = self.ExpressionMatrix(m1, m2)
self.PrintMatrix(result)
det_result = self.det(result)
print(str(det_result))
def PrintMatrix(self, matrix):
print(str(matrix))
def ExpressionMatrix(self, a, b):
# 3*A + 5*B
a = 3*a
self.PrintMatrix(a)
b = 5*b
self.PrintMatrix(b)
result = a + b
return result
def det(self, matrix):
det_matrix = np.linalg.det(matrix)
return det_matrix
def main():
expression = Expression()
expression.MainMethod()
main() |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 DevIntelle Consulting Service Pvt.Ltd (<http://www.devintellecs.com>).
#
# For Module Support : devintelle@gmail.com or Skype : devintelle
#
##############################################################################
from . import product_tags
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
#generatore di livelli: da una bitmap si ottiene un file di testo come una matrice di numeri
#legend.txt: corrispondenza tra valori a 24 bit ed elementi di gioco
#formato del file di output:
# - numero righe
# - numero colonne
# - matrice
from PIL import Image
import sys
#creo la legenda a mano, dovrei leggerla da un legend.txt!
legenda=[]
default=0
legenda.append(((0,0,0),1))
im = Image.open(sys.argv[1])
nomeFileOut=sys.argv[1][0:sys.argv[1].rindex(".")]+".txt"
out = open(nomeFileOut,"w")
out.write(str(im.size[0])+","+str(im.size[1])+"\n")
for i in list(im.getdata()):
elemento=default
for element in legenda:
print str(i)+","+str(element[0])+"\n"
if i[0]==element[0][0]:
elemento=element[1]
break
print elemento
out.write(str(elemento)+",")
|
from itertools import accumulate
from sys import stdin
input = stdin.readline
n, m = map(int, input().split())
arr = list(map(int, input().split()))
acc = [0] + list(accumulate(arr))
for _ in range(m):
l, r = map(int, input().split())
print(acc[r]-acc[l-1])
|
import json
import os
import time
from seeder import Seeder
class CourseCreationException(Exception):
"""Raised when the creation of a course fails"""
pass
class CourseSeeder(Seeder):
"""
Class for course creation actions
Will create a course in the provided studio link and the import a tarfile
for a very basic course structure.
"""
def __init__(self, studio_url):
super(CourseSeeder, self).__init__(studio_url=studio_url)
def create_course(self, course_data):
"""
Creates a course with given data and then returns the course key
Arguments:
course_data (dict): Org, course, run, and display_name
Returns:
course_id for successful course creation
Raises:
CourseCreationException when a course creation failure
"""
print "Creating course with this course data: {}".format(course_data)
url = '{}/course/'.format(self.studio_url)
response = self.sess.post(url, json=course_data, headers=self.get_post_headers(self.studio_url))
if response.status_code != 200:
raise CourseCreationException("{}: {}".format(response.status_code, response.content))
elif "course_key" not in response.content:
raise CourseCreationException(response.content[:100])
return json.loads(response.content)["course_key"]
def import_tarfile(self, course_id, tarfile):
url = '{}/import/{}'.format(self.studio_url, course_id)
print 'Importing {} to {} from {}'.format(course_id, url, tarfile)
print 'Upload may take a while depending on size of the course'
headers = self.get_post_headers(url)
headers.pop("Content-Type")
with open(os.path.join(os.path.dirname(__file__), tarfile), 'rb') as upload:
filename = os.path.basename(tarfile)
upload.seek(0, 2)
end = upload.tell()
upload.seek(0, 0)
while 1:
start = upload.tell()
data = upload.read(2 * (10 ** 7))
if not data:
break
stop = upload.tell() - 1
files = [
('course-data', (filename, data, 'application/x-gzip'))
]
headers['Content-Range'] = '%d-%d/%d' % (start, stop, end)
self.sess.post(url, files=files, headers=headers)
# now check import status
import_status_url = '{}/import_status/{}/{}'.format(
self.studio_url, course_id, filename)
status = 0
while status != 4:
status = self.sess.get(import_status_url).json()['ImportStatus']
time.sleep(3)
print 'Uploaded!'
|
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from json import dumps, loads
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url, render_json, chunks
class ReplicaClient(BaseClient):
"""Replica client class for working with replicas"""
REPLICAS_BASEURL = 'replicas'
REPLICAS_CHUNK_SIZE = 1000
def quarantine_replicas(self, replicas, rse=None, rse_id=None):
"""
Add quaratined replicas for RSE.
:param replicas: List of replica infos: {'scope': <scope> (optional), 'name': <name> (optional), 'path':<path> (required)}.
:param rse: RSE name.
:param rse_id: RSE id. Either RSE name or RSE id must be specified, but not both
"""
if (rse is None) == (rse_id is None):
raise ValueError("Either RSE name or RSE id must be specified, but not both")
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'quarantine']))
headers = {}
for chunk in chunks(replicas, self.REPLICAS_CHUNK_SIZE):
data = {'rse': rse, 'rse_id': rse_id, 'replicas': chunk}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code != codes.ok:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def declare_bad_file_replicas(self, replicas, reason, force=False):
"""
Declare a list of bad replicas.
:param replicas: Either a list of PFNs (string) or a list of dicts {'scope': <scope>, 'name': <name>, 'rse_id': <rse_id> or 'rse': <rse_name>}
:param reason: The reason of the loss.
:param force: boolean, tell the serrver to ignore existing replica status in the bad_replicas table. Default: False
:returns: Dictionary {"rse_name": ["did: error",...]} - list of strings for DIDs failed to declare, by RSE
"""
out = {} # {rse: ["did: error text",...]}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad']))
headers = {}
for chunk in chunks(replicas, self.REPLICAS_CHUNK_SIZE):
data = {'reason': reason, 'replicas': chunk, 'force': force}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code not in (codes.created, codes.ok):
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
chunk_result = loads(r.text)
if chunk_result:
for rse, lst in chunk_result.items():
out.setdefault(rse, []).extend(lst)
return out
def declare_bad_did_replicas(self, rse, dids, reason):
"""
Declare a list of bad replicas.
:param rse: The RSE where the bad replicas reside
:param dids: The DIDs of the bad replicas
:param reason: The reason of the loss.
"""
data = {'reason': reason, 'rse': rse, 'dids': dids}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad/dids']))
headers = {}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def declare_suspicious_file_replicas(self, pfns, reason):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
"""
data = {'reason': reason, 'pfns': pfns}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'suspicious']))
headers = {}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_did_from_pfns(self, pfns, rse=None):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse: The RSE name.
:returns: A list of dictionaries {pfn: {'scope': scope, 'name': name}}
"""
data = {'rse': rse, 'pfns': pfns}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'dids']))
headers = {}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_replicas(self, dids, schemes=None, ignore_availability=True,
all_states=False, metalink=False, rse_expression=None,
client_location=None, sort=None, domain=None,
signature_lifetime=None, nrandom=None,
resolve_archives=True, resolve_parents=False,
updated_after=None):
"""
List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs) like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param ignore_availability: Also include replicas from blocked RSEs into the list
:param metalink: ``False`` (default) retrieves as JSON,
``True`` retrieves as metalink4+xml.
:param rse_expression: The RSE expression to restrict replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param sort: Sort the replicas: ``geoip`` - based on src/dst IP topographical distance
``closeness`` - based on src/dst closeness
``dynamic`` - Rucio Dynamic Smart Sort (tm)
:param domain: Define the domain. None is fallback to 'wan', otherwise 'wan, 'lan', or 'all'
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param nrandom: pick N random replicas. If the initial number of replicas is smaller than N, returns all replicas.
:param resolve_archives: When set to True, find archives which contain the replicas.
:param resolve_parents: When set to True, find all parent datasets which contain the replicas.
:param updated_after: epoch timestamp or datetime object (UTC time), only return replicas updated after this time
:returns: A list of dictionaries with replica information.
"""
data = {'dids': dids,
'domain': domain}
if schemes:
data['schemes'] = schemes
if ignore_availability is not None:
data['ignore_availability'] = ignore_availability
data['all_states'] = all_states
if rse_expression:
data['rse_expression'] = rse_expression
if client_location:
data['client_location'] = client_location
if sort:
data['sort'] = sort
if updated_after:
if isinstance(updated_after, datetime):
# encode in UTC string with format '%Y-%m-%dT%H:%M:%S' e.g. '2020-03-02T12:01:38'
data['updated_after'] = updated_after.strftime('%Y-%m-%dT%H:%M:%S')
else:
data['updated_after'] = updated_after
if signature_lifetime:
data['signature_lifetime'] = signature_lifetime
if nrandom:
data['nrandom'] = nrandom
data['resolve_archives'] = resolve_archives
data['resolve_parents'] = resolve_parents
url = build_url(choice(self.list_hosts),
path='/'.join([self.REPLICAS_BASEURL, 'list']))
headers = {}
if metalink:
headers['Accept'] = 'application/metalink4+xml'
# pass json dict in querystring
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data), stream=True)
if r.status_code == codes.ok:
if not metalink:
return self._load_json_data(r)
return r.text
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_suspicious_replicas(self, rse_expression=None, younger_than=None, nattempts=None):
"""
List file replicas tagged as suspicious.
:param rse_expression: The RSE expression to restrict replicas on a set of RSEs.
:param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.
:param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.
:param state: State of the replica, either 'BAD' or 'SUSPICIOUS'. No value returns replicas with either state.
"""
params = {}
if rse_expression:
params['rse_expression'] = rse_expression
if younger_than:
params['younger_than'] = younger_than
if nattempts:
params['nattempts'] = nattempts
url = build_url(choice(self.list_hosts),
path='/'.join([self.REPLICAS_BASEURL, 'suspicious']))
r = self._send_request(url, type_='GET', params=params)
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_replica(self, rse, scope, name, bytes_, adler32, pfn=None, md5=None, meta={}):
"""
Add file replicas to a RSE.
:param rse: the RSE name.
:param scope: The scope of the file.
:param name: The name of the file.
:param bytes_: The size in bytes.
:param adler32: adler32 checksum.
:param pfn: PFN of the file for non deterministic RSE.
:param md5: md5 checksum.
:param meta: Metadata attributes.
:return: True if files were created successfully.
"""
dict_ = {'scope': scope, 'name': name, 'bytes': bytes_, 'meta': meta, 'adler32': adler32}
if md5:
dict_['md5'] = md5
if pfn:
dict_['pfn'] = pfn
return self.add_replicas(rse=rse, files=[dict_])
def add_replicas(self, rse, files, ignore_availability=True):
"""
Bulk add file replicas to a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param ignore_availability: Ignore the RSE blocklsit.
:return: True if files were created successfully.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files, 'ignore_availability': ignore_availability}
r = self._send_request(url, type_='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_replicas(self, rse, files, ignore_availability=True):
"""
Bulk delete file replicas from a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param ignore_availability: Ignore the RSE blocklist.
:return: True if files have been deleted successfully.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files, 'ignore_availability': ignore_availability}
r = self._send_request(url, type_='DEL', data=render_json(**data))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def update_replicas_states(self, rse, files):
"""
Bulk update the file replicas states from a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>, 'state': <state1>}, {'scope': <scope2>, 'name': <name2>, 'state': <state2>}, ...],
where a state value can be either of:
'A' (AVAILABLE)
'U' (UNAVAILABLE)
'C' (COPYING)
'B' (BEING_DELETED)
'D' (BAD)
'T' (TEMPORARY_UNAVAILABLE)
:return: True if replica states have been updated successfully, otherwise an exception is raised.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files}
r = self._send_request(url, type_='PUT', data=render_json(**data))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas(self, scope, name, deep=False):
"""
List dataset replicas for a did (scope:name).
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:returns: A list of dict dataset replicas.
"""
payload = {}
if deep:
payload = {'deep': True}
url = build_url(self.host,
path='/'.join([self.REPLICAS_BASEURL, quote_plus(scope), quote_plus(name), 'datasets']),
params=payload)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas_bulk(self, dids):
"""
List dataset replicas for a did (scope:name).
:param dids: The list of DIDs of the datasets.
:returns: A list of dict dataset replicas.
"""
payload = {'dids': list(dids)}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'datasets_bulk']))
r = self._send_request(url, type_='POST', data=dumps(payload))
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas_vp(self, scope, name, deep=False):
"""
List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:returns: If VP exists a list of dicts of sites
"""
payload = {}
if deep:
payload = {'deep': True}
url = build_url(self.host,
path='/'.join([self.REPLICAS_BASEURL, quote_plus(scope), quote_plus(name), 'datasets_vp']),
params=payload)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_datasets_per_rse(self, rse, filters=None, limit=None):
"""
List datasets at a RSE.
:param rse: the rse name.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:returns: A list of dict dataset replicas.
"""
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'rse', rse]))
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_bad_pfns(self, pfns, reason, state, expires_at):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param state: The state of the replica. Either BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:return: True if PFNs were created successfully.
"""
data = {'reason': reason, 'pfns': pfns, 'state': state, 'expires_at': expires_at}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad/pfns']))
headers = {}
r = self._send_request(url, headers=headers, type_='POST', data=dumps(data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_tombstone(self, replicas):
"""
Set a tombstone on a list of replicas.
:param replicas: list of replicas.
"""
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'tombstone']))
data = {'replicas': replicas}
r = self._send_request(url, type_='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
|
#!/usr/bin/env python
import flask
import flaskext.script
import database
default_config = {
'DATABASE_URI': 'postgresql://localhost/reportdb',
'TESTING_DATABASE_URI': 'postgresql://localhost/reportdb_test',
'HTTP_PROXIED': False,
'FRAME_URL': None,
}
def create_app():
import views
app = flask.Flask(__name__, instance_relative_config=True)
app.config.update(default_config)
app.config.from_pyfile('settings.py', silent=True)
_my_extensions = app.jinja_options["extensions"] + ["jinja2.ext.do"]
app.jinja_options = dict(app.jinja_options, extensions=_my_extensions)
database.initialize_app(app)
views.register_on(app)
if app.config["HTTP_PROXIED"]:
from revproxy import ReverseProxied
app.wsgi_app = ReverseProxied(app.wsgi_app)
return app
manager = flaskext.script.Manager(create_app)
@manager.command
def resetdb():
database.get_session().drop_all()
@manager.command
def syncdb():
database.get_session().create_all()
@manager.command
def import_seris():
"""
Imported fields:
u'details_original_name'
u'format_availability_url'
'details_translated_in_0'
u'details_publisher'
'header_country_0'
u'format_date_of_last_update'
u'details_english_name'
u'format_report_type'
'details_original_language_0'
u'short_description'
"""
from seris_old import SERIS_DATA
from schema import _load_json
countries_mapping = _load_json("refdata/seris_old_countries_mapping.json")
countries_list = _load_json("refdata/countries_list.json")
countries = [pair[0] for pair in countries_list]
imported = 0
skipped = 0
for country, reports in SERIS_DATA.items():
if country == 'eea' or countries_mapping[country] in countries:
for form_data in reports:
report_row = database.ReportRow()
seris_review_row = database.SerisReviewRow()
session = database.get_session()
for k, v in form_data.items():
if v is None:
del(form_data[k])
found = False
for count in range(0, 100):
header_country = form_data.get('header_country_%s' % count)
if header_country not in countries:
if header_country:
del(form_data['header_country_%s' % count])
else:
found = True
if not found:
if country == 'eea':
print ('Skipped report %s from eea: '
'countries not in scope' %
form_data.get('details_original_name'))
skipped += 1
continue
else:
form_data['header_country_0'] = countries_mapping[
country]
if country == 'eea':
form_data['header_region_0'] = (
'European Environment Agency')
if form_data.get('category') == 'National portal':
form_data['format_report_type'] = 'portal (dynamic source)'
try:
int(form_data.get('format_date_of_publication'))
except ValueError:
print 'Report %s in country %s - invalid year "%s"' % (
form_data.get('details_original_name'), country,
form_data.get('format_date_of_publication'))
except TypeError:
pass
report_schema = schema.ReportSchema.from_flat(form_data)
seris_review_schema = schema.SerisReviewSchema.from_flat(
form_data)
report_row.update(report_schema.flatten())
uploader = 'Imported from SERIS 1'
report_row['header_uploader'] = uploader
report_row['header_upload_date'] = '01 Jan 1999, 00:00'
session.save(report_row)
seris_review_schema['report_id'].set(report_row.id)
# Review part
seris_review_row.clear()
seris_review_row.update(seris_review_schema.flatten())
session.save(seris_review_row)
imported += 1
session.commit()
print '%s reports imported' % imported
print '%s reports skipped' % skipped
def _debug_log(name):
import logging
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
if __name__ == '__main__':
import schema
schema.register_handler_for_empty()
manager.run()
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'gridGame' function below.
#
# The function is expected to return a 2D_INTEGER_ARRAY.
# The function accepts following parameters:
# 1. 2D_INTEGER_ARRAY grid
# 2. INTEGER k
# 3. STRING_ARRAY rules
#
def gridGame(grid, k, rules):
RULES = {'alive': 1, 'dead': 0}
idx = [i for i, rule in enumerate(rules) if RULES[rule] == 1]
for i in range(k):
neighbour = counter(grid)
grid = apply_rule(grid, neighbour, idx)
return grid
def apply_rule(grid, neighbour, idx):
for i in range(len(grid)):
for j in range(len(grid[0])):
if neighbour[i][j] in idx:
grid[i][j] = 1
else:
grid[i][j] = 0
return grid
def find_nn(x, y, m, n):
return [(x2, y2) for x2 in range(x - 1, x + 2) for y2 in range(y - 1, y + 2) if (
(-1 < x) and (x < m) and (-1 < y) and (y < n) and (0 <= x2 < m) and (0 <= y2 < n) and (
x != x2 or y != y2))]
def counter(grid):
m = len(grid)
n = len(grid[0])
neighbour = [[0 for i in range(len(grid[0]))] for i in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
nn = find_nn(i, j, m, n)
for _n in nn:
if grid[_n[0]][_n[1]] == 1:
neighbour[i][j] += 1
return neighbour
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
grid_rows = int(input().strip())
grid_columns = int(input().strip())
grid = []
for _ in range(grid_rows):
grid.append(list(map(int, input().rstrip().split())))
k = int(input().strip())
rules_count = int(input().strip())
rules = []
for _ in range(rules_count):
rules_item = input()
rules.append(rules_item)
result = gridGame(grid, k, rules)
fptr.write('\n'.join([' '.join(map(str, x)) for x in result]))
fptr.write('\n')
fptr.close()
if __name__ == "__main__":
k = 15
arr = [12, 3, 4, 5, 5, 7, 8, 9, 7]
print(targetsum_set(arr, k))
print(targetsum_sort(arr, k)) |
#Time comp --> o(n log n)
#space comp --> o(n)
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals=sorted(intervals,key=lambda x:x[0])
out=[]
for i in range(len(intervals)):
if i==0:
out.append(intervals[i])
else:
last=out.pop()
interval=intervals[i]
if last[1]>=interval[0]:
out.append([last[0],max(last[1],interval[1])])
else:
out.append(last)
out.append(interval)
return out
|
from time import sleep
import requests
from bs4 import BeautifulSoup
URL = 'http://cs.ouc.edu.cn/news-list.aspx?nc=14'
if __name__ == '__main__':
while True:
html = requests.get(URL).text
soup = BeautifulSoup(html, 'html.parser')
ul = soup.find('ul', {'class': 'border-dotted'})
li = ul.find_all('li')
for l in li:
print(l.find('span', {'class': 'news-time'}).text, end=' ')
print(l.find('span', {'class': 'news-title'}).text)
sleep(15)
|
from base64 import b64decode, b64encode
import gevent
import gevent.event
import json
from PIL import Image, ImageDraw
import StringIO
import zlib
import logging
from ajenti.api import *
from ajenti.api.http import HttpPlugin, url, SocketPlugin
from ajenti.plugins.configurator.api import ClassConfigEditor
from ajenti.plugins.main.api import SectionPlugin, intent
from ajenti.ui import UIElement, p, on
from ajenti.users import PermissionProvider, restrict
from terminal import Terminal
@plugin
class TerminalClassConfigEditor (ClassConfigEditor):
title = _('Terminal')
icon = 'list-alt'
def init(self):
self.append(self.ui.inflate('terminal:config'))
@plugin
class Terminals (SectionPlugin):
default_classconfig = {'shell': 'sh -c $SHELL || bash'}
classconfig_editor = TerminalClassConfigEditor
def init(self):
self.title = _('Terminal')
self.icon = 'list-alt'
self.category = _('Tools')
self.append(self.ui.inflate('terminal:main'))
self.terminals = {}
self.context.session.terminals = self.terminals
def on_page_load(self):
self.refresh()
@intent('terminals:refresh')
def refresh(self):
ulist = self.find('list')
ulist.empty()
self.find('empty').visible = len(self.terminals) == 0
for k, v in list(self.terminals.iteritems()):
if v.autoclose and v.dead():
self.terminals.pop(k)
for k in sorted(self.terminals.keys()):
thumb = TerminalThumbnail(self.ui)
thumb.tid = k
thumb.on('close', self.on_close, k)
ulist.append(thumb)
def run_shell(self, command=None, autoopen=False, autoclose=True, callback=None, **kwargs):
if not command:
command = self.classconfig['shell']
if self.terminals:
key = sorted(self.terminals.keys())[-1] + 1
else:
key = 0
url = '/ajenti:terminal/%i' % key
def _callback(exitcode=None):
if callback:
callback()
if autoclose and exitcode == 0:
self.context.endpoint.send_close_tab(url)
self.terminals[key] = Terminal(command, autoclose=autoclose, callback=_callback, **kwargs)
self.refresh()
if autoopen:
self.context.endpoint.send_open_tab(url, 'Terminal %i' % key)
return key
@intent('terminal')
def launch(self, command=None, callback=None):
self.run_shell(command, autoclose=True, autoopen=True, callback=callback)
@on('new-button', 'click')
@restrict('terminal:shell')
def on_new(self):
self.run_shell(command=None, autoopen=False, autoclose=False)
@on('run-button', 'click')
@restrict('terminal:custom')
def on_run(self):
self.run_shell(self.find('command').value, autoclose=True, autoopen=True)
def on_close(self, k):
self.terminals[k].kill()
self.terminals.pop(k)
self.refresh()
@plugin
class TerminalHttp (BasePlugin, HttpPlugin):
colors = {
'green': '#859900',
'white': '#eee8d5',
'yellow': '#b58900',
'red': '#dc322f',
'magenta': '#d33682',
'violet': '#6c71c4',
'blue': '#268bd2',
'cyan': '#2aa198',
}
@url('/ajenti:terminal/(?P<id>\d+)')
def get_page(self, context, id):
if context.session.identity is None:
context.respond_redirect('/')
context.add_header('Content-Type', 'text/html')
context.respond_ok()
return self.open_content('static/index.html').read()
@url('/ajenti:terminal/(?P<id>\d+)/thumbnail')
def get_thumbnail(self, context, id):
terminal = context.session.terminals[int(id)]
img = Image.new("RGB", (terminal.width, terminal.height * 2 + 20))
draw = ImageDraw.Draw(img)
draw.rectangle([0, 0, terminal.width, terminal.height], fill=(0, 0, 0))
for y in range(0, terminal.height):
for x in range(0, terminal.width):
fc = terminal.screen.buffer[y][x][1]
if fc == 'default':
fc = 'lightgray'
if fc in self.colors:
fc = self.colors[fc]
fc = ImageDraw.ImageColor.getcolor(fc, 'RGB')
bc = terminal.screen.buffer[y][x][2]
if bc == 'default':
bc = 'black'
if bc in self.colors:
bc = self.colors[bc]
bc = ImageDraw.ImageColor.getcolor(bc, 'RGB')
ch = terminal.screen.buffer[y][x][0]
draw.point((x, 10 + y * 2 + 1), fill=(fc if ord(ch) > 32 else bc))
draw.point((x, 10 + y * 2), fill=bc)
sio = StringIO.StringIO()
img.save(sio, 'PNG')
context.add_header('Content-type', 'image/png')
context.respond_ok()
return sio.getvalue()
@p('tid', default=0, type=int)
@plugin
class TerminalThumbnail (UIElement):
typeid = 'terminal:thumbnail'
@plugin
class TerminalSocket (SocketPlugin):
name = '/terminal'
def on_connect(self):
self.emit('re-select')
self.terminal = None
self.ready_to_send = gevent.event.Event()
self.have_data = gevent.event.Event()
def on_message(self, message):
if message['type'] == 'select':
self.id = int(message['tid'])
try:
self.terminal = self.context.session.terminals.get(self.id)
except AttributeError:
logging.error('Cannot assign terminal')
self.terminal = None
if self.terminal is None:
url = '/ajenti:terminal/%i' % self.id
self.context.endpoint.send_close_tab(url)
return
self.send_data(self.terminal.protocol.history())
self.spawn(self.worker)
self.spawn(self.sender)
if message['type'] == 'key':
if self.terminal:
ch = b64decode(message['key'])
self.terminal.write(ch)
self.ready_to_send.set()
if message['type'] == 'input':
if self.terminal:
data = message['content']
self.terminal.write(data)
self.ready_to_send.set()
if message['type'] == 'read':
self.ready_to_send.set()
def worker(self):
while True:
self.terminal.protocol.read(timeout=1)
if self.terminal.protocol.has_updates():
self.have_data.set()
if self.terminal.dead():
del self.context.session.terminals[self.id]
self.context.launch('terminals:refresh')
return
def sender(self):
while True:
self.ready_to_send.wait()
self.have_data.wait()
data = self.terminal.protocol.format()
self.have_data.clear()
self.send_data(data)
self.ready_to_send.clear()
if self.terminal.dead():
return
def send_data(self, data):
data = b64encode(zlib.compress(json.dumps(data))[2:-4])
self.emit('set', data)
@plugin
class TerminalPermissionsProvider (PermissionProvider):
def get_name(self):
return _('Terminal')
def get_permissions(self):
return [
('terminal:shell', _('Run shell')),
('terminal:custom', _('Run custom commands')),
]
|
from django.db import models
from django.conf import settings
class Note(models.Model):
title = models.CharField(max_length=100)
#image = models.ImageField(blank= True, null= True)
url = models.URLField(blank= True, null= True)
timestand = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_delete_url(self):
return f"/notes/{self.pk}/delete"
def get_update_url(self):
return f"/notes/{self.pk}/update"
|
#!/usr/bin/env python3
import time
import torch
import torch.nn.functional as F
from dataloader2 import get_loaders
from model2 import DynamicAttention
# set seed for reproducibility
seed = 0
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def main():
"""MAIN FUNCTION."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_path = 'data/train_data.txt'
valid_path = 'data/valid_data.txt'
batch_size = 1
num_workers = 0
sequence_len = 100
# get DataLoader object
dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
batch_size, num_workers, shuffle=False)
print('Dataset size:', dataset_sizes)
# get mini-batch
it = iter(dataloader)
for i, batch in enumerate(dataloaders['Valid']):
# extract data
X_frames, X_objs, y = batch
X_frames = X_frames.transpose(0, 1).to(device)
X_objs = X_objs.transpose(0, 1).to(device)
y = y.transpose(0, 1).to(device)
# create network
hidden_size = 512
rnn_layers = 2
pretrained = True
net = DynamicAttention(hidden_size, rnn_layers, pretrained).to(device)
# load weights
net.load_state_dict(torch.load('data/net_params.pkl'))
# set to evaluation mode
net = net.eval()
# initialize hidden states
states = net.init_states(batch_size, device)
s = 'outputs/{}.txt'.format(i+1)
with open(s, 'w') as f:
# for each timestep
for t in range(sequence_len):
start_time = time.time()
frame = X_frames[t]
objs = X_objs[t]
print('objs:', objs[0, :, 0, 100, 100])
output, states, attn = net.forward(frame, objs, states)
output = F.softmax(output.squeeze(), dim=0)
attn = attn.squeeze()
output = output.tolist()
attn = attn.tolist()
print('output:', output)
print('attn:', attn)
for item in output:
f.write(str(item) + ' ')
for item in attn:
f.write(str(item) + ' ')
f.write('\n')
fps = 1 / ((time.time() - start_time) + 1/30)
print('FPS:', fps)
print()
if __name__ == '__main__':
main()
|
"""Controller do Grupo."""
class Grupo(object):
"""Classe responsavel por gerenciar a View e Model relacionados a Grupo.
Attributes:
view (View:obj): Objeto root View
model (Model:obj): Objeto root Model
"""
def __init__(self, controller: object) -> None:
"""Construtor padrao, define os atributos view e model."""
self.view = controller.view
self.model = controller.model
def carregar_grupos(self) -> None:
"""Busca os grupos no Model e carrega a listagem dos grupos na View."""
for grupo in self.model.grupo.grupos:
self.view.grupo.lista_de_grupos.adicionar(grupo=grupo)
def evento_sortear(self, valor: dict) -> None:
self.model.grupo.grupo = valor
self.view.destruir_container_ativo()
self.view.criar_container_home()
self.view.home.criar_janela_de_cadastro()
def evento_cadastrar(self) -> None:
"""Evento click do botao cadastrar.
- Cria a janela de cadastro
"""
self.view.grupo.criar_janela_de_cadastro()
def evento_cancelar_cadastro(self) -> None:
"""Evento click do botao cancelar no formulario.
- Destroi a janela de cadastro
"""
self.view.grupo.destruir_janela_de_cadastro()
def evento_remover_grupo(self, id_grupo: str) -> None:
"""Evento click do botao remover da lista de grupos.
Args:
id_grupo (str): ID do grupo que sera removido
- Remove o grupo do bd
- Destroi a lista de grupos
- Carrega a lista de grupos
"""
self.model.grupo.remover_grupo(id_grupo=id_grupo)
self.view.grupo.destruir_lista_de_grupos()
self.carregar_grupos()
self.model.apresentacao.obter_apresentacoes()
def evento_confirmar_cadastro(self) -> None:
"""Evento click do botao confirmar do formulario de cadastro.
- Obtem os campos do formulario
- Valida os campos a procura de erros
- se tudo ok -> continua
- se ouver erro -> cria uma janela de erro
- Gera os grupos de acordo com os dados do formulario
- Salva cada grupo gerado
- Destroi janela de cadastro
- Destroi a lista de grupos
- Carrega a lista de grupos
"""
form = self.view.grupo.janela_de_cadastro.obter_campos()
erro = self.model.grupo.validar_campos(formulario=form)
if erro:
self.view.criar_janela_de_erro(erro=erro)
return
grupos = self.model.grupo.gerar_grupos(formulario=form)
for grupo in grupos:
self.model.grupo.cadastrar_grupo(grupo=grupo)
self.view.grupo.destruir_janela_de_cadastro()
self.view.grupo.destruir_lista_de_grupos()
self.carregar_grupos()
def evento_elemento_montado(self) -> None:
"""Evento disparado quando o componente/container Grupo e montado.
- Inicia o componente/container Grupo
- Carrea a lista de grupos
"""
self.view.grupo.iniciar()
self.carregar_grupos()
|
import pickle
import socket
import struct
import threading
import cv2
class ConfigDetectSocket(threading.Thread):
def __init__(self, queue, configure):
super().__init__()
self.connected = set()
self.queue = queue
self.configure = configure
self.server_ip = configure.config_detect_ip
self.port = configure.config_detect_port
def send(self):
try:
self.queue.queues.clear()
client_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
client_socket.connect((self.server_ip, self.port))
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
while True:
try:
data = self.queue.get()
if data is not None:
result, frame = cv2.imencode('.jpg', data, encode_param)
data = pickle.dumps(frame, 0)
size = len(data)
client_socket.sendall(struct.pack(">L", size) + data)
except Exception as e:
print(e)
client_socket.close()
except Exception as e:
print(e)
# self.send()
def run(self):
self.send()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.