text stringlengths 38 1.54M |
|---|
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from .factory import PlayerFactory, TeamFactory
from .models import PlayerModel, TeamModel
class TestTeam(APITestCase):
"""
This class includes test cases for Team happy path.
"""
def setUp(self):
"""
Setup function used to create temporary record for test cases.
"""
self.name = "CSK"
self.club_state = "MP"
self.team = []
# Create multiple service plan object using loop.
for _ in range(3):
team = TeamFactory()
self.team.append(
{"pk": team.pk, "name": team.name, "club_state": team.club_state,}
)
self.team_list_url = reverse("team_list")
self.team_detail_url = reverse("team_detail", args=[self.team[0]["pk"]])
self.data = {
"name": self.name,
"club_state": self.club_state,
}
def test_team_list_all(self):
"""
Test case for team list.
"""
response = self.client.get(self.team_list_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
# Check length of response data with created data.
assert len(response.data) == len(self.team)
def test_team_create(self):
"""
Test case for create team.
"""
response = self.client.post(self.team_list_url, data=self.data, format="json")
# Check returned response is integer.
assert type(response.data["id"]) == int
# Check status code for success url.
assert response.status_code == status.HTTP_201_CREATED
team = TeamModel.objects.get(pk=response.data["id"])
assert team.name == self.data["name"]
assert team.club_state == self.data["club_state"]
def test_team_detail(self):
"""
Test case for team get detail.
"""
response = self.client.get(self.team_detail_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
assert response.data["id"] == self.team[0]["pk"]
def test_team_update(self):
"""
Test case for update team.
"""
response = self.client.put(self.team_detail_url, data=self.data, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
# Check returned response is integer.
assert type(response.data["id"]) == int
team = TeamModel.objects.get(pk=response.data["id"])
assert team.name == self.data["name"]
assert team.club_state == self.data["club_state"]
def test_team_delete(self):
"""
Test case for team delete.
"""
response = self.client.delete(self.team_detail_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_204_NO_CONTENT
class TestPlayer(APITestCase):
"""
This class includes test cases for Player's happy path.
"""
def setUp(self):
"""
Setup function used to create temporary record for test cases.
"""
self.first_name = "Rohit"
self.last_name = "Thakur"
self.country = "Indore"
self.jersey_number = 111
self.data = {
"first_name": self.first_name,
"last_name": self.last_name,
"country": self.country,
"jersey_number": self.jersey_number,
}
self.team = TeamFactory()
# Creating multiple objects and appending into a list.
self.player_list = []
for _ in range(3):
player = PlayerFactory(team=self.team,)
self.player_list.append(
{
"pk": player.pk,
"first_name": player.first_name,
"last_name": player.last_name,
"country": player.country,
"jersey_number": player.jersey_number,
}
)
self.player_list_url = reverse("player_list", args=[self.team.id],)
self.player_detail_url = reverse(
"player_detail", args=[self.team.id, self.player_list[0]["pk"],],
)
def test_player_list_all(self):
"""
Test case for player list.
"""
response = self.client.get(self.player_list_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
# Check length of response data with created data.
assert len(response.data) == len(self.player_list)
def test_player_create(self):
"""
Test case for create team.
"""
response = self.client.post(self.player_list_url, data=self.data, format="json")
# Check returned response is integer.
assert type(response.data["PlayerId"]) == int
# Check status code for success url.
assert response.status_code == status.HTTP_201_CREATED
player = PlayerModel.objects.get(pk=response.data["PlayerId"])
# Checking response data with created data in setup.
assert player.first_name == self.data["first_name"]
assert player.last_name == self.data["last_name"]
assert player.country == self.data["country"]
assert player.jersey_number == self.data["jersey_number"]
def test_player_detail(self):
"""
Test case for player get detail.
"""
response = self.client.get(self.player_detail_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
assert response.data["id"] == self.player_list[0]["pk"]
def test_player_update(self):
"""
Test case for update player.
"""
response = self.client.put(
self.player_detail_url, data=self.data, format="json"
)
# Check returned response is integer.
assert type(response.data["PlayerId"]) == int
# Check status code for success url.
assert response.status_code == status.HTTP_200_OK
player = PlayerModel.objects.get(pk=response.data["PlayerId"])
assert player.first_name == self.data["first_name"]
assert player.last_name == self.data["last_name"]
assert player.country == self.data["country"]
assert player.jersey_number == self.data["jersey_number"]
def test_player_delete(self):
"""
Test case for player delete.
"""
response = self.client.delete(self.player_detail_url, {}, format="json")
# Check status code for success url.
assert response.status_code == status.HTTP_204_NO_CONTENT
|
# import all the required libraries
# spacy for lemmatization
import en_core_web_sm
import numpy as np
import pandas as pd
nlp = en_core_web_sm.load()
# plotting tool
# import pyLDAvis
# import pyLDAvis.gensim
import nltk
from nltk import word_tokenize
import re
from nltk.metrics import edit_distance
from nltk.corpus import wordnet as wsn
filepath = '/home/maitreyee/Development/Dm_develop/code/plotdata1_test.csv'
data_brkdwn = pd.read_csv(filepath, sep='\t')
def sent_to_wrd(q1, q2):
return word_tokenize(q1), word_tokenize(q2)
# perform post tagging and stemming
# perform stemming
class Lesk():
def __init__(self, sentence):
self.sentence = sentence
self.meaning = {}
for word in sentence:
self.meaning[word] = ''
def get_senses(self, words):
return wsn.synsets(words.lower())
def gloss(self, senses):
gloss = {}
for sense in senses:
gloss[sense.name()] = []
for sense in senses:
gloss[sense.name()] += word_tokenize(sense.definition())
return gloss
def getAllsenses(self, word):
senses = self.get_senses(word)
if senses == []:
return {word.lower(): senses}
return self.gloss(senses)
def score(self, set1, set2):
# Base
overlap = 0
# step
for word in set1:
for word in set2:
overlap += 1
return overlap
def overlapScore(self, word1, word2):
gloss_set1 = self.getAllsenses(word1)
if self.meaning[word2] == '':
gloss_set2 = self.getAllsenses(word2)
else:
# print
gloss_set2 = self.gloss([wsn.synset(self.meaning[word2])])
score = {}
for i in gloss_set1.keys():
score[i] = 0
for j in gloss_set2.keys():
score[i] += self.score(gloss_set1[i], gloss_set2[j])
bestSense = None
max_Score = 0
for i in gloss_set1.keys():
if score[i] > max_Score:
max_Score = score[i]
bestSense = i
return bestSense, max_Score
def lesk(self, word, sentence):
maxOverlap = 0
context = sentence
word_sense = []
meaning = {}
senses = self.get_senses(word)
for sense in senses:
meaning[sense.name()] = 0
for word_context in context:
if not word == word_context:
score = self.overlapScore(word, word_context)
if score[0] == None:
continue
meaning[score[0]] += score[1]
if senses == []:
return word, None, None
self.meaning[word] = max(meaning.keys(), key=lambda x: meaning[x])
return word, self.meaning[word], wsn.synset(self.meaning[word]).definition()
def path(set1, set2):
return wsn.path_similarity(set1, set2)
def wup(set1, set2):
return wsn.wup_similarity(set1, set2)
def edit(word1, word2):
if float(edit_distance(word1, word2)) == 0.0:
return 0.0
return 1.0 / float(edit_distance(word1, word2))
def compute_path(q1, q2):
R = np.zeros((len(q1), len(q2)))
for i in range(len(q1)):
for j in range(len(q2)):
if q1[i][1] == None or q2[j][1] == None:
sim = edit(q1[i][0], q2[j][0])
R[i, j] = sim
return R
# compute WUP distance
def computeWUP(q1, q2):
R = np.zeros((len(q1), len(q2)))
for i in range(len(q1)):
for j in range(len(q2)):
if q1[i][1] == None or q2[j][1] == None:
sim = edit(q1[i][0], q2[j][0])
else:
sim = wup(wsn.synset(q1[i][1]), wsn.synset(q2[j][1]))
if sim == None:
sim = edit(q1[i][0], q2[j][0])
R[i, j] = sim
return R
def overallSim(q1, q2, R):
sum_X = 0.0
sum_Y = 0.0
for i in range(len(q1)):
max_i = 0.0
for j in range(len(q2)):
if R[i, j] > max_i:
sum_X += max_i
for i in range(len(q1)):
max_j = 0.0
for j in range(len(q2)):
if R[i, j] > max_j:
max_j = R[i, j]
sum_Y += max_j
if (float(len(q1)) + float(len(q2))) == 0.0:
return 0.0
overall = (sum_X + sum_Y) / (2 * (float(len(q1)) + float(len(q2))))
return overall
def semanticSimilarity(q1, q2):
tokens_q1, tokens_q2 = sent_to_wrd(q1, q2)
sentences = []
sentencex = []
for words in tokens_q1:
tag_q1 = nlp(words)
for token in tag_q1:
if 'NOUN' in token.dep_ or 'ADJ' in token.pos_:
sentences.append(token.text)
for wordx in tokens_q2:
tag_q2 = nlp(wordx)
for token in tag_q2:
if 'NOUN' in token.dep_ or 'ADJ' in token.pos_:
sentencex.append(token.text)
sense1 = Lesk(sentences)
sentence1means = []
for word in sentences:
sentence1means.append(sense1.lesk(word, sentences))
sense2 = Lesk(sentencex)
sentence2means = []
for word in sentencex:
sentence2means.append(sense2.lesk(word, sentencex))
R1 = compute_path(sentence1means, sentence2means)
R2 = computeWUP(sentence1means, sentence2means)
R = (R1 + R2) / 2
return overallSim(sentence1means, sentence2means, R)
STOP_WORDS = nltk.corpus.stopwords.words()
def clean_sentence(val):
regex = re.compile('([^\s\w]|_)+')
sentence = regex.sub('', val).lower()
sentence = sentence.split(" ")
for word in list(sentence):
if word in STOP_WORDS:
sentence.remove(word)
sentence = " ".join(sentence)
return sentence
# from sklearn.metrics import log_loss
df_sim = pd.read_csv(
'/home/maitreyee/Development/python_notebook/brkdwn_similarity.csv', sep=',')
X_train = df_sim
X_train = X_train.dropna(how='any')
# y = X_train['is_duplicate']
print('Exported Cleaned train Data, no need for cleaning')
for col in ['utterance1', 'utterance2']:
X_train[col] = X_train[col].apply(clean_sentence)
y_pred = []
count = 0
print('calculating similarity for the training data, please wait.')
for row in X_train.itertuples():
q1 = str(row[2])
q2 = str(row[3])
sim = semanticSimilarity(q1, q2)
count += 1
if count % 1000 == 0:
print(str(count) + ", " + str(sim) + ", " + str(row[3]))
y_pred.append(sim)
output = pd.DataFrame(list(zip(X_train['utterance1'].tolist(), y_pred)),
columns=['Utterance1', 'similarity'])
output.to_csv('semantic_sim.csv', index=False, sep='\t')
|
from redis import Redis, DataError
from redis.client import Pipeline
from redis.client import bool_ok
from redis._compat import nativestr
class TSInfo(object):
rules = []
labels = []
sourceKey = None
chunk_count = None
memory_usage = None
total_samples = None
retention_msecs = None
last_time_stamp = None
first_time_stamp = None
# As of RedisTimeseries >= v1.4 max_samples_per_chunk is deprecated in favor of chunk_size
max_samples_per_chunk = None
chunk_size = None
duplicate_policy = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.rules = response['rules']
self.sourceKey = response['sourceKey']
self.chunkCount = response['chunkCount']
self.memory_usage = response['memoryUsage']
self.total_samples = response['totalSamples']
self.labels = list_to_dict(response['labels'])
self.retention_msecs = response['retentionTime']
self.lastTimeStamp = response['lastTimestamp']
self.first_time_stamp = response['firstTimestamp']
if 'maxSamplesPerChunk' in response:
self.max_samples_per_chunk = response['maxSamplesPerChunk']
self.chunk_size = self.max_samples_per_chunk * 16 # backward compatible changes
if 'chunkSize' in response:
self.chunk_size = response['chunkSize']
if 'duplicatePolicy' in response:
self.duplicate_policy = response['duplicatePolicy']
if type(self.duplicate_policy) == bytes:
self.duplicate_policy = self.duplicate_policy.decode()
def list_to_dict(aList):
return {nativestr(aList[i][0]): nativestr(aList[i][1])
for i in range(len(aList))}
def parse_range(response):
return [tuple((l[0], float(l[1]))) for l in response]
def parse_m_range(response):
res = []
for item in response:
res.append({nativestr(item[0]): [list_to_dict(item[1]),
parse_range(item[2])]})
return res
def parse_get(response):
if not response:
return None
return int(response[0]), float(response[1])
def parse_m_get(response):
res = []
for item in response:
if item[2] == []:
res.append({nativestr(item[0]): [list_to_dict(item[1]), None, None]})
else:
res.append({nativestr(item[0]): [list_to_dict(item[1]),
int(item[2][0]), float(item[2][1])]})
return res
def parseToList(response):
res = []
for item in response:
res.append(nativestr(item))
return res
class Client(object): # changed from StrictRedis
"""
This class subclasses redis-py's `Redis` and implements
RedisTimeSeries's commands (prefixed with "ts").
The client allows to interact with RedisTimeSeries and use all of
it's functionality.
"""
CREATE_CMD = 'TS.CREATE'
ALTER_CMD = 'TS.ALTER'
ADD_CMD = 'TS.ADD'
MADD_CMD = 'TS.MADD'
INCRBY_CMD = 'TS.INCRBY'
DECRBY_CMD = 'TS.DECRBY'
DEL_CMD = 'TS.DEL'
CREATERULE_CMD = 'TS.CREATERULE'
DELETERULE_CMD = 'TS.DELETERULE'
RANGE_CMD = 'TS.RANGE'
REVRANGE_CMD = 'TS.REVRANGE'
MRANGE_CMD = 'TS.MRANGE'
MREVRANGE_CMD = 'TS.MREVRANGE'
GET_CMD = 'TS.GET'
MGET_CMD = 'TS.MGET'
INFO_CMD = 'TS.INFO'
QUERYINDEX_CMD = 'TS.QUERYINDEX'
def __init__(self, conn=None, *args, **kwargs):
"""
Creates a new RedisTimeSeries client.
"""
self.redis = conn if conn is not None else Redis(*args, **kwargs)
# Set the module commands' callbacks
MODULE_CALLBACKS = {
self.CREATE_CMD: bool_ok,
self.ALTER_CMD: bool_ok,
self.CREATERULE_CMD: bool_ok,
self.DELETERULE_CMD: bool_ok,
self.RANGE_CMD: parse_range,
self.REVRANGE_CMD: parse_range,
self.MRANGE_CMD: parse_m_range,
self.MREVRANGE_CMD: parse_m_range,
self.GET_CMD: parse_get,
self.MGET_CMD: parse_m_get,
self.INFO_CMD: TSInfo,
self.QUERYINDEX_CMD: parseToList,
}
for k in MODULE_CALLBACKS:
self.redis.set_response_callback(k, MODULE_CALLBACKS[k])
@staticmethod
def appendUncompressed(params, uncompressed):
if uncompressed:
params.extend(['UNCOMPRESSED'])
@staticmethod
def appendWithLabels(params, with_labels, select_labels=None):
if with_labels and select_labels:
raise DataError("with_labels and select_labels cannot be provided together.")
if with_labels:
params.extend(['WITHLABELS'])
if select_labels:
params.extend(['SELECTED_LABELS', *select_labels])
@staticmethod
def appendGroupbyReduce(params, groupby, reduce):
if groupby is not None and reduce is not None:
params.extend(['GROUPBY', groupby, 'REDUCE', reduce.upper()])
@staticmethod
def appendRetention(params, retention):
if retention is not None:
params.extend(['RETENTION', retention])
@staticmethod
def appendLabels(params, labels):
if labels:
params.append('LABELS')
for k, v in labels.items():
params.extend([k, v])
@staticmethod
def appendCount(params, count):
if count is not None:
params.extend(['COUNT', count])
@staticmethod
def appendTimestamp(params, timestamp):
if timestamp is not None:
params.extend(['TIMESTAMP', timestamp])
@staticmethod
def appendAlign(params, align):
if align is not None:
params.extend(['ALIGN', align])
@staticmethod
def appendAggregation(params, aggregation_type,
bucket_size_msec):
params.append('AGGREGATION')
params.extend([aggregation_type, bucket_size_msec])
@staticmethod
def appendChunkSize(params, chunk_size):
if chunk_size is not None:
params.extend(['CHUNK_SIZE', chunk_size])
@staticmethod
def appendDuplicatePolicy(params, command, duplicate_policy):
if duplicate_policy is not None:
if command == 'TS.ADD':
params.extend(['ON_DUPLICATE', duplicate_policy])
else:
params.extend(['DUPLICATE_POLICY', duplicate_policy])
@staticmethod
def appendFilerByTs(params, ts_list):
if ts_list is not None:
params.extend(["FILTER_BY_TS", *ts_list])
@staticmethod
def appendFilerByValue(params, min_value, max_value):
if min_value is not None and max_value is not None:
params.extend(["FILTER_BY_VALUE", min_value, max_value])
def create(self, key, **kwargs):
"""
Create a new time-series.
Args:
key: time-series key
retention_msecs: Maximum age for samples compared to last event time (in milliseconds).
If None or 0 is passed then the series is not trimmed at all.
uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
Adding this flag will keep data in an uncompressed form. Compression not only saves
memory but usually improve performance due to lower number of memory accesses
labels: Set of label-value pairs that represent metadata labels of the key.
chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
duplicate_policy: since RedisTimeSeries v1.4 you can specify the duplicate sample policy ( Configure what to do on duplicate sample. )
Can be one of:
- 'block': an error will occur for any out of order sample
- 'first': ignore the new value
- 'last': override with latest value
- 'min': only override if the value is lower than the existing value
- 'max': only override if the value is higher than the existing value
When this is not set, the server-wide default will be used.
"""
retention_msecs = kwargs.get('retention_msecs', None)
uncompressed = kwargs.get('uncompressed', False)
labels = kwargs.get('labels', {})
chunk_size = kwargs.get('chunk_size', None)
duplicate_policy = kwargs.get('duplicate_policy', None)
params = [key]
self.appendRetention(params, retention_msecs)
self.appendUncompressed(params, uncompressed)
self.appendChunkSize(params, chunk_size)
self.appendDuplicatePolicy(params, self.CREATE_CMD, duplicate_policy)
self.appendLabels(params, labels)
return self.redis.execute_command(self.CREATE_CMD, *params)
def alter(self, key, **kwargs):
"""
Update the retention, labels of an existing key. The parameters
are the same as TS.CREATE.
"""
retention_msecs = kwargs.get('retention_msecs', None)
labels = kwargs.get('labels', {})
duplicate_policy = kwargs.get('duplicate_policy', None)
params = [key]
self.appendRetention(params, retention_msecs)
self.appendDuplicatePolicy(params, self.ALTER_CMD, duplicate_policy)
self.appendLabels(params, labels)
return self.redis.execute_command(self.ALTER_CMD, *params)
def add(self, key, timestamp, value, **kwargs):
"""
Append (or create and append) a new sample to the series.
Args:
key: time-series key
timestamp: timestamp of the sample. * can be used for automatic timestamp (using the system clock).
value: numeric data value of the sample
retention_msecs: Maximum age for samples compared to last event time (in milliseconds).
If None or 0 is passed then the series is not trimmed at all.
uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
Adding this flag will keep data in an uncompressed form. Compression not only saves
memory but usually improve performance due to lower number of memory accesses
labels: Set of label-value pairs that represent metadata labels of the key.
chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
duplicate_policy: since RedisTimeSeries v1.4 you can specify the duplicate sample policy ( Configure what to do on duplicate sample. )
Can be one of:
- 'block': an error will occur for any out of order sample
- 'first': ignore the new value
- 'last': override with latest value
- 'min': only override if the value is lower than the existing value
- 'max': only override if the value is higher than the existing value
When this is not set, the server-wide default will be used.
"""
retention_msecs = kwargs.get('retention_msecs', None)
uncompressed = kwargs.get('uncompressed', False)
labels = kwargs.get('labels', {})
chunk_size = kwargs.get('chunk_size', None)
duplicate_policy = kwargs.get('duplicate_policy', None)
params = [key, timestamp, value]
self.appendRetention(params, retention_msecs)
self.appendUncompressed(params, uncompressed)
self.appendChunkSize(params, chunk_size)
self.appendDuplicatePolicy(params, self.ADD_CMD, duplicate_policy)
self.appendLabels(params, labels)
return self.redis.execute_command(self.ADD_CMD, *params)
def madd(self, ktv_tuples):
"""
Appends (or creates and appends) a new ``value`` to series
``key`` with ``timestamp``. Expects a list of ``tuples`` as
(``key``,``timestamp``, ``value``). Return value is an
array with timestamps of insertions.
"""
params = []
for ktv in ktv_tuples:
for item in ktv:
params.append(item)
return self.redis.execute_command(self.MADD_CMD, *params)
def incrby(self, key, value, **kwargs):
"""
Increment (or create an time-series and increment) the latest sample's of a series.
This command can be used as a counter or gauge that automatically gets history as a time series.
Args:
key: time-series key
value: numeric data value of the sample
timestamp: timestamp of the sample. None can be used for automatic timestamp (using the system clock).
retention_msecs: Maximum age for samples compared to last event time (in milliseconds).
If None or 0 is passed then the series is not trimmed at all.
uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
Adding this flag will keep data in an uncompressed form. Compression not only saves
memory but usually improve performance due to lower number of memory accesses
labels: Set of label-value pairs that represent metadata labels of the key.
chunk_size: Each time-series uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
"""
timestamp = kwargs.get('timestamp', None)
retention_msecs = kwargs.get('retention_msecs', None)
uncompressed = kwargs.get('uncompressed', False)
labels = kwargs.get('labels', {})
chunk_size = kwargs.get('chunk_size', None)
params = [key, value]
self.appendTimestamp(params, timestamp)
self.appendRetention(params, retention_msecs)
self.appendUncompressed(params, uncompressed)
self.appendChunkSize(params, chunk_size)
self.appendLabels(params, labels)
return self.redis.execute_command(self.INCRBY_CMD, *params)
def decrby(self, key, value, **kwargs):
"""
Decrement (or create an time-series and decrement) the latest sample's of a series.
This command can be used as a counter or gauge that automatically gets history as a time series.
Args:
key: time-series key
value: numeric data value of the sample
timestamp: timestamp of the sample. None can be used for automatic timestamp (using the system clock).
retention_msecs: Maximum age for samples compared to last event time (in milliseconds).
If None or 0 is passed then the series is not trimmed at all.
uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
Adding this flag will keep data in an uncompressed form. Compression not only saves
memory but usually improve performance due to lower number of memory accesses
labels: Set of label-value pairs that represent metadata labels of the key.
chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples.
You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
"""
timestamp = kwargs.get('timestamp', None)
retention_msecs = kwargs.get('retention_msecs', None)
uncompressed = kwargs.get('uncompressed', False)
labels = kwargs.get('labels', {})
chunk_size = kwargs.get('chunk_size', None)
params = [key, value]
self.appendTimestamp(params, timestamp)
self.appendRetention(params, retention_msecs)
self.appendUncompressed(params, uncompressed)
self.appendChunkSize(params, chunk_size)
self.appendLabels(params, labels)
return self.redis.execute_command(self.DECRBY_CMD, *params)
def delrange(self, key, from_time, to_time):
"""
Delete data points for a given timeseries and interval range in the form of start and end delete timestamps.
The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted.
Return the count for deleted items.
Args:
key: time-series key.
from_time: Start timestamp for the range deletion.
to_time: End timestamp for the range deletion.
"""
return self.redis.execute_command(self.DEL_CMD, key, from_time, to_time)
def createrule(self, source_key, dest_key,
aggregation_type, bucket_size_msec):
"""
Creates a compaction rule from values added to ``source_key``
into ``dest_key``. Aggregating for ``bucket_size_msec`` where an
``aggregation_type`` can be ['avg', 'sum', 'min', 'max',
'range', 'count', 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s']
"""
params = [source_key, dest_key]
self.appendAggregation(params, aggregation_type, bucket_size_msec)
return self.redis.execute_command(self.CREATERULE_CMD, *params)
def deleterule(self, source_key, dest_key):
"""Deletes a compaction rule"""
return self.redis.execute_command(self.DELETERULE_CMD, source_key, dest_key)
def __range_params(self, key, from_time, to_time, count, aggregation_type, bucket_size_msec,
filter_by_ts, filter_by_min_value, filter_by_max_value, align):
"""
Internal method to create TS.RANGE and TS.REVRANGE arguments
"""
params = [key, from_time, to_time]
self.appendFilerByTs(params, filter_by_ts)
self.appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
self.appendCount(params, count)
self.appendAlign(params, align)
if aggregation_type is not None:
self.appendAggregation(params, aggregation_type, bucket_size_msec)
return params
def range(self, key, from_time, to_time, count=None, aggregation_type=None,
bucket_size_msec=0, filter_by_ts=None, filter_by_min_value=None,
filter_by_max_value=None, align=None):
"""
Query a range in forward direction for a specific time-serie.
Args:
key: Key name for timeseries.
from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
to_time: End timestamp for range query, + can be used to express the maximum possible timestamp.
count: Optional maximum number of returned results.
aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count',
'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s']
bucket_size_msec: Time bucket for aggregation in milliseconds.
filter_by_ts: List of timestamps to filter the result by specific timestamps.
filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value).
filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value).
align: Timestamp for alignment control for aggregation.
"""
params = self.__range_params(key, from_time, to_time, count, aggregation_type, bucket_size_msec,
filter_by_ts, filter_by_min_value, filter_by_max_value, align)
return self.redis.execute_command(self.RANGE_CMD, *params)
def revrange(self, key, from_time, to_time, count=None, aggregation_type=None,
bucket_size_msec=0, filter_by_ts=None, filter_by_min_value=None,
filter_by_max_value=None, align=None):
"""
Query a range in reverse direction for a specific time-serie.
Note: This command is only available since RedisTimeSeries >= v1.4
Args:
key: Key name for timeseries.
from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
to_time: End timestamp for range query, + can be used to express the maximum possible timestamp.
count: Optional maximum number of returned results.
aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count',
'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s']
bucket_size_msec: Time bucket for aggregation in milliseconds.
filter_by_ts: List of timestamps to filter the result by specific timestamps.
filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value).
filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value).
align: Timestamp for alignment control for aggregation.
"""
params = self.__range_params(key, from_time, to_time, count, aggregation_type, bucket_size_msec,
filter_by_ts, filter_by_min_value, filter_by_max_value, align)
return self.redis.execute_command(self.REVRANGE_CMD, *params)
def __mrange_params(self, aggregation_type, bucket_size_msec, count, filters, from_time, to_time,
with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value, groupby,
reduce, select_labels, align):
"""
Internal method to create TS.MRANGE and TS.MREVRANGE arguments
"""
params = [from_time, to_time]
self.appendFilerByTs(params, filter_by_ts)
self.appendFilerByValue(params, filter_by_min_value, filter_by_max_value)
self.appendCount(params, count)
self.appendAlign(params, align)
if aggregation_type is not None:
self.appendAggregation(params, aggregation_type, bucket_size_msec)
self.appendWithLabels(params, with_labels, select_labels)
params.extend(['FILTER'])
params += filters
self.appendGroupbyReduce(params, groupby, reduce)
return params
def mrange(self, from_time, to_time, filters, count=None, aggregation_type=None, bucket_size_msec=0,
with_labels=False, filter_by_ts=None, filter_by_min_value=None, filter_by_max_value=None,
groupby=None, reduce=None, select_labels=None, align=None):
"""
Query a range across multiple time-series by filters in forward direction.
Args:
from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
to_time: End timestamp for range query, + can be used to express the maximum possible timestamp.
filters: filter to match the time-series labels.
count: Optional maximum number of returned results.
aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count',
'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s']
bucket_size_msec: Time bucket for aggregation in milliseconds.
with_labels: Include in the reply the label-value pairs that represent metadata labels of the time-series.
If this argument is not set, by default, an empty Array will be replied on the labels array position.
filter_by_ts: List of timestamps to filter the result by specific timestamps.
filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value).
filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value).
groupby: Grouping by fields the results (must mention also reduce).
reduce: Applying reducer functions on each group. Can be one of ['sum', 'min', 'max'].
select_labels: Include in the reply only a subset of the key-value pair labels of a series.
align: Timestamp for alignment control for aggregation.
"""
params = self.__mrange_params(aggregation_type, bucket_size_msec, count, filters, from_time, to_time,
with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value,
groupby, reduce, select_labels, align)
return self.redis.execute_command(self.MRANGE_CMD, *params)
def mrevrange(self, from_time, to_time, filters, count=None, aggregation_type=None, bucket_size_msec=0,
with_labels=False, filter_by_ts=None, filter_by_min_value=None, filter_by_max_value=None,
groupby=None, reduce=None, select_labels=None, align=None):
"""
Query a range across multiple time-series by filters in reverse direction.
Args:
from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
to_time: End timestamp for range query, + can be used to express the maximum possible timestamp.
filters: filter to match the time-series labels.
count: Optional maximum number of returned results.
aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count',
'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s']
bucket_size_msec: Time bucket for aggregation in milliseconds.
with_labels: Include in the reply the label-value pairs that represent metadata labels of the time-series.
If this argument is not set, by default, an empty Array will be replied on the labels array position.
filter_by_ts: List of timestamps to filter the result by specific timestamps.
filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value).
filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value).
groupby: Grouping by fields the results (must mention also reduce).
reduce: Applying reducer functions on each group. Can be one of ['sum', 'min', 'max'].
select_labels: Include in the reply only a subset of the key-value pair labels of a series.
align: Timestamp for alignment control for aggregation.
"""
params = self.__mrange_params(aggregation_type, bucket_size_msec, count, filters, from_time, to_time,
with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value,
groupby, reduce, select_labels, align)
return self.redis.execute_command(self.MREVRANGE_CMD, *params)
def get(self, key):
"""Gets the last sample of ``key``"""
return self.redis.execute_command(self.GET_CMD, key)
def mget(self, filters, with_labels=False):
"""Get the last samples matching the specific ``filter``."""
params = []
self.appendWithLabels(params, with_labels)
params.extend(['FILTER'])
params += filters
return self.redis.execute_command(self.MGET_CMD, *params)
def info(self, key):
"""Gets information of ``key``"""
return self.redis.execute_command(self.INFO_CMD, key)
def queryindex(self, filters):
"""Get all the keys matching the ``filter`` list."""
return self.redis.execute_command(self.QUERYINDEX_CMD, *filters)
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
Overridden in order to provide the right client through the pipeline.
"""
p = Pipeline(
connection_pool=self.redis.connection_pool,
response_callbacks=self.redis.response_callbacks,
transaction=transaction,
shard_hint=shard_hint)
p.redis = p
return p
class Pipeline(Pipeline, Client):
"Pipeline for Redis TimeSeries Client"
|
from tree.tree_node import TreeNode
def inorder_traversal(root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root is None:
return []
node_stack = []
done = False
current = root
result = []
while not done:
if current is not None:
node_stack.append(current)
current = current.left
else:
if len(node_stack) > 0:
current = node_stack.pop()
result.append(current.val)
current = current.right
else:
done = True
return result
|
import abc
import uuid
import base64
import datetime
from dataclasses import dataclass
from typing import Tuple, Any, List
from server_protocol import SignupRequest
class StorageLayerException(Exception):
...
class StorageLayer(abc.ABC):
@abc.abstractmethod
def get_user_by_id(
self, identifier: str
) -> Tuple[uuid.UUID, str, str, datetime.datetime]:
"""
:param identifier: user_id
:return: user_id, name, public_key and last_seen_time
"""
...
@abc.abstractmethod
def check_if_user_exists(self, identifier: str) -> bool:
...
@abc.abstractmethod
def create_new_user(self, name: str, public_key: str) -> str:
...
@abc.abstractmethod
def get_message_list_for_user(
self, identifier: str
) -> List[Tuple[str, str, str, int, bytes]]:
...
@abc.abstractmethod
def get_user_id_list(self, id_to_ignore: str) -> List[str]:
...
@abc.abstractmethod
def send_message(self, sender, receiver, message_type, content) -> str:
...
@abc.abstractmethod
def update_user_last_seen(self, user_id) -> None:
...
@abc.abstractmethod
def close_connection(self) -> None:
...
@dataclass
class Message:
message_id: str
source: uuid.UUID
destination: uuid.UUID
message_type: int
content: bytes
class User:
def __init__(
self,
identifier: uuid.UUID,
name: str,
public_key: str,
last_seen: datetime.datetime,
) -> None:
self._id = identifier
self._name = name
self._public_key = base64.b64decode(public_key)
self._last_seen = last_seen
@staticmethod
def get_user_by_id(storage_layer: StorageLayer, user_id: str) -> Any:
if not storage_layer.check_if_user_exists(user_id):
raise StorageLayerException(f"User {user_id} does not exist!")
return User(*storage_layer.get_user_by_id(user_id))
@staticmethod
def create_new_user(storage_layer: StorageLayer, request: SignupRequest) -> Any:
new_id = storage_layer.create_new_user(
name=request.name.decode(),
public_key=base64.b64encode(request.pub_key).decode(),
)
return User.get_user_by_id(storage_layer, new_id)
def get_all_messages(self, storage_layer: StorageLayer) -> List[Message]:
messages: List[Message] = []
for (
message_id,
source,
destination,
message_type,
content,
) in storage_layer.get_message_list_for_user(self.id):
messages.append(
Message(
message_id,
uuid.UUID(hex=str(source)),
uuid.UUID(hex=str(destination)),
message_type,
content,
)
)
return messages
def send_message(
self,
storage_layer: StorageLayer,
sender: str,
message_type: int,
content: bytes,
) -> str:
message_id = storage_layer.send_message(sender, self.id, message_type, content)
return message_id
@property
def id(self) -> str:
return str(self._id).replace("-", "")
@property
def name(self) -> str:
return self._name
@property
def public_key(self) -> str:
return self._public_key
@property
def last_seen(self) -> str:
return self._last_seen
class UserList:
@staticmethod
def get_user_list(storage_layer: StorageLayer, user_to_ignore: str) -> List[User]:
users: List[User] = []
for user_id in storage_layer.get_user_id_list(user_to_ignore):
users.append(User.get_user_by_id(storage_layer, user_id))
return users
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 22:33:07 2018
@author: bruce
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
import os
# set saving path
path_result_freq = "/home/bruce/Dropbox/Project/5.Result/5.Result_Nov/2.freq_domain/"
def correlation_matrix(corr_mx, cm_title):
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels = ['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels = ['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = corr_mx.copy()
temp = np.asarray(temp)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='binary')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
#fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_min_01_comb(corr_mx1 ,corr_mx2, cm_title1, cm_title2):
# find the minimum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx1)
output1 = (temp == temp.min(axis=1)[:,None]) # along rows
temp = np.asarray(corr_mx2)
output2 = (temp == temp.min(axis=1)[:,None]) # along rows
fig, (ax1, ax2) = plt.subplots(1, 2)
# figure 1
im1 = ax1.matshow(output1, cmap='binary')
#fig.colorbar(im1, ax1)
ax1.grid(False)
ax1.set_title(cm_title1)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# figure 2
im2 = ax2.matshow(output2, cmap='binary')
#fig.colorbar(im2, ax2)
ax2.grid(False)
ax2.set_title(cm_title2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels, fontsize=6)
ax1.set_yticklabels(ylabels, fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# eg: plot_mag_db(df_as_85_vsc, 1, "Subject")
def fig_mag_db(signal_in, subject_number = 'subject_number', title = 'title', filename = 'filename'):
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal_in.iloc[2*(subject_number-1), :48030], '-')
plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030], '-')
plt.ylabel('magnitude')
plt.xlim(0,10000)
plt.legend(('Retest', 'Test'), loc='upper right')
plt.title(title)
# plt.subplot(2,1,2)
# plt.plot(signal_in.iloc[2*(subject_number-1), :48030].apply(f_dB), '-')
# plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030].apply(f_dB), '-')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('dB')
# plt.xlim(0,10000)
# plt.legend(('Retest', 'Test'), loc='lower right')
plt.show()
plt.savefig(filename)
def fig_mag_in_1(signal_in, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_test_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_retest_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i+1, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def distance_mx(sig_in):
# freq_range -> from 0 to ???
freq_range = 13000
matrix_temp = np.zeros((22, 22))
matrix_temp_square = np.zeros((22, 22))
for i in range(22):
for j in range(22):
temp = np.asarray(sig_in.iloc[2*i, 0:freq_range] - sig_in.iloc[2*j+1, 0:freq_range])
temp_sum = 0
temp_square_sum = 0
for k in range(freq_range):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp[k])
temp_square_sum = temp_square_sum + (abs(temp[k]))**2
matrix_temp[i][j] = temp_sum
matrix_temp_square[i][j] = temp_square_sum
output_1 = pd.DataFrame(matrix_temp)
output_2 = pd.DataFrame(matrix_temp_square)
# output 1 is similar with euclidian diatance eg. x1+jy1 -> sqrt(x1**2 + y1**2)
# output 1 is square result eg. x1+jy1 -> x1**2 + y1**2
return output_1, output_2
def complex_coherence_mx(input_signal):
# compute the magnitude squared coherence based on signal.coherence
# Cxy = abs(Pxy)**2/(Pxx*Pyy)
# Pxy = csd(x, y)
# Pxx = welch()
# Pyy= welch()
# then create the matrix with values
# higher value -> better coherence value
sig_in = input_signal.copy()
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=32)
# delete values lower than 0.01
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < 0.1:
temp_Cxy[l] = 0
# delete finish
# test
'''
if i ==0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
plt.title("test in complex_coherence_mx")
plt.show()
'''
# test finish
for k in range(len(temp_Cxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Cxy[k])
matrix_temp[i][j] = temp_sum
output_3 = pd.DataFrame(matrix_temp)
return output_3
def csd_mx(input_signal):
# compute the magnitude squared coherence based on signal.coherence
# Cxy = abs(Pxy)**2/(Pxx*Pyy)
# Pxy = csd(x, y)
# Pxx = welch()
# Pyy= welch()
# then create the matrix with values
# higher value -> better coherence value
sig_in = input_signal.copy()
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
f, temp_Pxy = signal.csd(sig_in_1, sig_in_2, fs=9606, nperseg=96)
# delete values lower than 0.01
'''
for l in range(len(temp_Pxy)):
if temp_Pxy[l] < 0.1:
temp_Pxy[l] = 0
'''
# delete finish
# test
'''
if i ==0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
plt.title("test in complex_coherence_mx")
plt.show()
'''
# test finish
for k in range(len(temp_Pxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Pxy[k])
matrix_temp[i][j] = temp_sum
output = pd.DataFrame(matrix_temp)
return output
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 96060
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 95036)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_85_vsc = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_label], axis=1).sort_values(by=["Vowel", "Subject", "Condition"]).reset_index(drop=True)
df_EFR_avg_85_withzero_vsc = df_EFR_avg_85_withzero.sort_values(by=["Vowel", "Subject", "Condition"]).reset_index(drop=True)
# normalization
# normalize the dataframe by standard deviation
df_EFR_avg_85_vsc_std = df_EFR_avg_85_vsc.std(axis=1)
df_EFR_avg_85_vsc_normbystd = (df_EFR_avg_85_vsc.iloc[:, :1024]).div(df_EFR_avg_85_vsc_std, axis=0)
# normalize the dataframe by
df_EFR_avg_85_vsc_sumofsquare = ((df_EFR_avg_85_vsc.iloc[:, :1024])**2).sum(axis=1)
df_EFR_avg_85_vsc_normbysumofsquare = (df_EFR_avg_85_vsc.iloc[:, :1024]).div(df_EFR_avg_85_vsc_sumofsquare, axis=0)
# separate vowels
df_EFR_avg_85_vsc_a = df_EFR_avg_85_vsc.iloc[0:44, :1024]
df_EFR_avg_85_vsc_e = df_EFR_avg_85_vsc.iloc[44:88, :1024]
df_EFR_avg_85_vsc_n = df_EFR_avg_85_vsc.iloc[88:132, :1024]
df_EFR_avg_85_vsc_u = df_EFR_avg_85_vsc.iloc[132:176, :1024]
df_EFR_avg_85_vsc_a_norm = df_EFR_avg_85_vsc_a.div((df_EFR_avg_85_vsc_a.max(axis=1)-df_EFR_avg_85_vsc_a.min(axis=1)), axis=0)
df_EFR_avg_85_vsc_e_norm = df_EFR_avg_85_vsc_e.div((df_EFR_avg_85_vsc_e.max(axis=1)-df_EFR_avg_85_vsc_e.min(axis=1)), axis=0)
df_EFR_avg_85_vsc_n_norm = df_EFR_avg_85_vsc_n.div((df_EFR_avg_85_vsc_n.max(axis=1)-df_EFR_avg_85_vsc_n.min(axis=1)), axis=0)
df_EFR_avg_85_vsc_u_norm = df_EFR_avg_85_vsc_u.div((df_EFR_avg_85_vsc_u.max(axis=1)-df_EFR_avg_85_vsc_u.min(axis=1)), axis=0)
df_EFR_avg_85_vsc_normbystd_a = df_EFR_avg_85_vsc_normbystd.iloc[0:44, :1024]
df_EFR_avg_85_vsc_normbystd_e = df_EFR_avg_85_vsc_normbystd.iloc[44:88, :1024]
df_EFR_avg_85_vsc_normbystd_n = df_EFR_avg_85_vsc_normbystd.iloc[88:132, :1024]
df_EFR_avg_85_vsc_normbystd_u = df_EFR_avg_85_vsc_normbystd.iloc[132:176, :1024]
df_EFR_avg_85_vsc_normbysumofsquare_a = 10 * df_EFR_avg_85_vsc_normbysumofsquare.iloc[0:44, :1024]
df_EFR_avg_85_vsc_normbysumofsquare_e = 10 * df_EFR_avg_85_vsc_normbysumofsquare.iloc[44:88, :1024]
df_EFR_avg_85_vsc_normbysumofsquare_n = 10 * df_EFR_avg_85_vsc_normbysumofsquare.iloc[88:132, :1024]
df_EFR_avg_85_vsc_normbysumofsquare_u = 10 * df_EFR_avg_85_vsc_normbysumofsquare.iloc[132:176, :1024]
# withzero
df_EFR_avg_85_withzero_vsc_a = df_EFR_avg_85_withzero_vsc.iloc[0:44, :48030]
df_EFR_avg_85_withzero_vsc_e = df_EFR_avg_85_withzero_vsc.iloc[44:88, :48030]
df_EFR_avg_85_withzero_vsc_n = df_EFR_avg_85_withzero_vsc.iloc[88:132, :48030]
df_EFR_avg_85_withzero_vsc_u = df_EFR_avg_85_withzero_vsc.iloc[132:176, :48030]
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85_vsc.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85_vsc.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85_vsc.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85_vsc.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
#df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 95036)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# test for csd_mx
sig_in = df_EFR_avg_85_vsc_a.copy()
#sig_in_t = df_EFR_avg_85_vsc_normbysumofsquare_a.copy()
#sig_in = 10 * sig_in_t
# sig_in = df_EFR_avg_85_vsc_a.copy()
# freq_range -> from 0 to ???
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
f, temp_Cxy = signal.csd(sig_in_1, sig_in_2, fs=9606, nperseg=96)
# select 0 to 1300Hz
'''
signal_length = 160
f = f[0:signal_length]
temp_Cxy = temp_Cxy[0:signal_length]
'''
# delete values lower than threshold(min_Cxy)
'''
min_Cxy = 0.1
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < min_Cxy:
temp_Cxy[l] = 0
'''
# delete finish
# test
if i == 0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
test_f = f
test_temp_Cxy = temp_Cxy
plt.title('R1 vs. T1(10ms window)')
plt.show()
# test finish
for k in range(len(temp_Cxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Cxy[k])
matrix_temp[i][j] = temp_sum
output_3 = pd.DataFrame(matrix_temp)
correlation_matrix_01(output_3, "complex coherence for 85dB a vowel(3.5ms window)")
# test finish
# magnitude squared coherence
csd_avg_85_vsc_a_ori = csd_mx(df_EFR_avg_85_vsc_a)
csd_avg_85_vsc_e_ori = csd_mx(df_EFR_avg_85_vsc_e)
csd_avg_85_vsc_n_ori = csd_mx(df_EFR_avg_85_vsc_n)
csd_avg_85_vsc_u_ori = csd_mx(df_EFR_avg_85_vsc_u)
csd_avg_85_vsc_a = csd_mx(df_EFR_avg_85_vsc_a_norm)
csd_avg_85_vsc_e = csd_mx(df_EFR_avg_85_vsc_e_norm)
csd_avg_85_vsc_n = csd_mx(df_EFR_avg_85_vsc_n_norm)
csd_avg_85_vsc_u = csd_mx(df_EFR_avg_85_vsc_u_norm)
# cross power spectral density
# use the numerator of coherence to take place of coherene
# input signal should be normalized
csd_avg_85_vsc_normbystd_a = csd_mx(df_EFR_avg_85_vsc_normbystd_a)
csd_avg_85_vsc_normbystd_e = csd_mx(df_EFR_avg_85_vsc_normbystd_e)
csd_avg_85_vsc_normbystd_n = csd_mx(df_EFR_avg_85_vsc_normbystd_n)
csd_avg_85_vsc_normbystd_u = csd_mx(df_EFR_avg_85_vsc_normbystd_u)
csd_avg_85_vsc_normbysumofsquare_a = csd_mx(df_EFR_avg_85_vsc_normbysumofsquare_a)
csd_avg_85_vsc_normbysumofsquare_e = csd_mx(df_EFR_avg_85_vsc_normbysumofsquare_e)
csd_avg_85_vsc_normbysumofsquare_n = csd_mx(df_EFR_avg_85_vsc_normbysumofsquare_n)
csd_avg_85_vsc_normbysumofsquare_u = csd_mx(df_EFR_avg_85_vsc_normbysumofsquare_u)
# sum of aenu
'''
df_coherence_avg_85_vsc_ae = df_coherence_avg_85_vsc_a.add(df_coherence_avg_85_vsc_e)
df_coherence_avg_85_vsc_nu = df_coherence_avg_85_vsc_n.add(df_coherence_avg_85_vsc_u)
df_coherence_avg_85_vsc_aenu = df_coherence_avg_85_vsc_ae.add(df_coherence_avg_85_vsc_nu)
'''
# max of aenu
'''
df_coherence_avg_85_vsc_maxae = pd.concat([df_coherence_avg_85_vsc_a, df_coherence_avg_85_vsc_e]).max(level=0)
df_coherence_avg_85_vsc_maxnu = pd.concat([df_coherence_avg_85_vsc_n, df_coherence_avg_85_vsc_u]).max(level=0)
df_coherence_avg_85_vsc_maxaenu = pd.concat([df_coherence_avg_85_vsc_maxae, df_coherence_avg_85_vsc_maxnu]).max(level=0)
'''
# multiplication of aenu
csd_avg_85_vsc_multiae = pd.DataFrame(csd_avg_85_vsc_a.values * csd_avg_85_vsc_e.values)
csd_avg_85_vsc_multinu = pd.DataFrame(csd_avg_85_vsc_n.values * csd_avg_85_vsc_u.values)
csd_avg_85_vsc_multiaenu = pd.DataFrame(csd_avg_85_vsc_multiae.values * csd_avg_85_vsc_multinu.values)
# plot the coherence matrix
correlation_matrix_01(csd_avg_85_vsc_a, "cross psd for 85dB a vowel normed by max-min")
correlation_matrix_01(csd_avg_85_vsc_e, "cross psd for 85dB e vowel normed by max-min")
correlation_matrix_01(csd_avg_85_vsc_n, "cross psd for 85dB n vowel normed by max-min")
correlation_matrix_01(csd_avg_85_vsc_u, "cross psd for 85dB u vowel normed by max-min")
correlation_matrix_01(csd_avg_85_vsc_a_ori, "cross psd for 85dB a vowel")
correlation_matrix_01(csd_avg_85_vsc_e_ori, "cross psd for 85dB e vowel")
correlation_matrix_01(csd_avg_85_vsc_n_ori, "cross psd for 85dB n vowel")
correlation_matrix_01(csd_avg_85_vsc_u_ori, "cross psd for 85dB u vowel")
correlation_matrix_01(csd_avg_85_vsc_multiaenu, "cross psd for 85dB concatenate aenu (multiply)")
correlation_matrix_01(csd_avg_85_vsc_normbystd_a, "cross psd for 85dB a vowel normed by std")
correlation_matrix_01(csd_avg_85_vsc_normbystd_e, "cross psd for 85dB e vowel normed by std")
correlation_matrix_01(csd_avg_85_vsc_normbystd_n, "cross psd for 85dB n vowel normed by std")
correlation_matrix_01(csd_avg_85_vsc_normbystd_u, "cross psd for 85dB u vowel normed by std")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_a, "cross psd for 85dB a vowel normed by sumofsquare")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_e, "cross psd for 85dB e vowel normed by sumofsquare")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_n, "cross psd for 85dB n vowel normed by sumofsquare")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_u, "cross psd for 85dB u vowel normed by sumofsquare")
'''
correlation_matrix_01(csd_avg_85_vsc_normbystd_a, "csd for 85dB a vowel(normed by std)")
correlation_matrix_01(csd_avg_85_vsc_normbystd_e, "csd for 85dB e vowel(normed by std)")
correlation_matrix_01(csd_avg_85_vsc_normbystd_n, "csd for 85dB n vowel(normed by std)")
correlation_matrix_01(csd_avg_85_vsc_normbystd_u, "csd for 85dB u vowel(normed by std)")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_a, "csd for 85dB a vowel(normed by sum of square of series)")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_e, "csd for 85dB e vowel(normed by sum of square of series)")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_n, "csd for 85dB n vowel(normed by sum of square of series)")
correlation_matrix_01(csd_avg_85_vsc_normbysumofsquare_u, "csd for 85dB u vowel(normed by sum of square of series)")
'''
# correlation_matrix_01(df_coherence_avg_85_0_vsc_a, "complex coherence for 85dB a vowel (zero padding)")
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
"""
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_withzero.iloc[i, 0:9606]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[100], temp_as[200], temp_as[300], temp_as[400], \
temp_as[500], temp_as[600], temp_as[700]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85= pd.DataFrame()
df_as7_85= pd.DataFrame()
df_as_win_85= pd.DataFrame()
df_as7_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as = np.abs((fftpack.fft(df_EFR_avg_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))])
df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,48030)), ignore_index = True)
df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
temp_as_win = np.abs((fftpack.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))])
df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,48030)), ignore_index = True)
df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[1000], temp_as_win[2000], temp_as_win[3000], temp_as_win[4000], \
temp_as_win[5000], temp_as_win[6000], temp_as_win[7000]]).reshape(1,7)), ignore_index = True)
df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
#resort df_as_85 based on 1.vowel, 2.subject 3.condition
df_as_85_vsc = df_as_85.sort_values(by=["Vowel", "Subject", "Condition"])
df_as_85_vsc = df_as_85_vsc.reset_index(drop=True)
df_as_85_vsc_label = df_as_85_vsc.iloc[:,48030:]
df_as_win_85_vsc = df_as_win_85.sort_values(by=["Vowel", "Subject", "Condition"])
df_as_win_85_vsc = df_as_win_85_vsc.reset_index(drop=True)
df_as_win_85_vsc_label = df_as_win_85_vsc.iloc[:,48030:]
df_as_85_vsc_a = df_as_85_vsc.iloc[0:44, :]
df_as_85_vsc_e = df_as_85_vsc.iloc[44:88, :]
df_as_85_vsc_n = df_as_85_vsc.iloc[88:132, :]
df_as_85_vsc_u = df_as_85_vsc.iloc[132:176, :]
df_as_win_85_vsc_a = df_as_win_85_vsc.iloc[0:44, :]
df_as_win_85_vsc_e = df_as_win_85_vsc.iloc[44:88, :]
df_as_win_85_vsc_n = df_as_win_85_vsc.iloc[88:132, :]
df_as_win_85_vsc_u = df_as_win_85_vsc.iloc[132:176, :]
# test
#fig_mag_db(df_as_85_vsc_a, 1, title = '85dB a vowel Subject 1 in frequency domain', filename = '85_a_s1_f_domain.png')
'''
# average test1 and test2
df_as_7_avg = pd.DataFrame()
for i in range(44):
df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7))
df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7))
df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True)
df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t)
# set the title of columns
df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"])
df_as_7_avg = df_as_7_avg.reset_index(drop=True)
'''
'''
# set a normalized AS
df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float))
df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1)
df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14])
# normalize
df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0)
# add label
df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True)
'''
# Complex Value
from scipy.spatial import distance_matrix
df_cx_85= pd.DataFrame()
df_cx_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_cx = (fftpack.fft(df_EFR_avg_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))]
df_cx_85 = df_cx_85.append(pd.DataFrame(temp_cx.reshape(1,48030)), ignore_index = True)
temp_cx_win = (fftpack.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))]
df_cx_win_85 = df_cx_win_85.append(pd.DataFrame(temp_cx_win.reshape(1,48030)), ignore_index = True)
df_cx_85 = pd.concat([df_cx_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_cx_win_85 = pd.concat([df_cx_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
#resort df_as_85 based on 1.vowel, 2.subject 3.condition
df_cx_85_vsc = df_cx_85.sort_values(by=["Vowel", "Subject", "Condition"])
df_cx_85_vsc = df_cx_85_vsc.reset_index(drop=True)
df_cx_85_vsc_label = df_cx_85_vsc.iloc[:,48030:]
df_cx_win_85_vsc = df_cx_win_85.sort_values(by=["Vowel", "Subject", "Condition"])
df_cx_win_85_vsc = df_cx_win_85_vsc.reset_index(drop=True)
df_cx_win_85_vsc_label = df_cx_win_85_vsc.iloc[:,48030:]
# normalize df_cx_85_vsc but didn't change the name
df_cx_85_vsc = df_cx_85_vsc.iloc[:, :48030].div((df_cx_85_vsc.iloc[:, :48030].abs()**2).sum(axis=1).div(48030).pow(1/2), axis=1)
df_cx_win_85_vsc = df_cx_win_85_vsc.iloc[:, :48030].div(((np.square(df_cx_win_85_vsc.iloc[:, :48030].abs())).sum(axis=1).div(48030)).pow(1/2), axis=1)
# df_cx_85_vsc = (df_cx_85_vsc.iloc[:, :48030]/(df_cx_85_vsc.iloc[:, :48030].abs().max()-df_cx_85_vsc.iloc[:, :48030].abs().min()))
# df_cx_win_85_vsc = (df_cx_win_85_vsc.iloc[:, :48030]/(df_cx_win_85_vsc.iloc[:, :48030].abs().max()-df_cx_win_85_vsc.iloc[:, :48030].abs().min()))
df_cx_85_vsc_a = df_cx_85_vsc.iloc[0:44, :].reset_index(drop=True)
df_cx_85_vsc_e = df_cx_85_vsc.iloc[44:88, :].reset_index(drop=True)
df_cx_85_vsc_n = df_cx_85_vsc.iloc[88:132, :].reset_index(drop=True)
df_cx_85_vsc_u = df_cx_85_vsc.iloc[132:176, :].reset_index(drop=True)
df_cx_win_85_vsc_a = df_cx_win_85_vsc.iloc[0:44, :].reset_index(drop=True)
df_cx_win_85_vsc_e = df_cx_win_85_vsc.iloc[44:88, :].reset_index(drop=True)
df_cx_win_85_vsc_n = df_cx_win_85_vsc.iloc[88:132, :].reset_index(drop=True)
df_cx_win_85_vsc_u = df_cx_win_85_vsc.iloc[132:176, :].reset_index(drop=True)
# test
# plot for test
# distance matrix
distance_cx_85_vsc_a, distance_cx_85_vsc_a_square = distance_mx(df_cx_85_vsc_a)
distance_cx_85_vsc_e, distance_cx_85_vsc_e_square = distance_mx(df_cx_85_vsc_e)
distance_cx_85_vsc_n, distance_cx_85_vsc_n_square = distance_mx(df_cx_85_vsc_n)
distance_cx_85_vsc_u, distance_cx_85_vsc_u_square = distance_mx(df_cx_85_vsc_u)
# test
'''
# coherence_cx_85_vsc_a = complex_coherence_mx(df_cx_85_vsc_a)
sig_in = df_cx_85_vsc_a.copy()
# sig_in = df_cx_85_vsc_a.copy().astype('complex256')
freq_range = 13000
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
# temp_numerator = pd.DataFrame()
temp_numerator_1 = sig_in.iloc[[2*i], 0:freq_range].reset_index(drop=True)
temp_numerator_2 = np.conj(sig_in.iloc[[2*j+1], 0:freq_range]).reset_index(drop=True)
temp_numerator = (temp_numerator_1 * temp_numerator_2).mean().to_frame().T.abs()
temp_denominator_1_signal = sig_in.iloc[[2*i], 0:freq_range]
temp_denominator_1 = np.sqrt((sig_in.iloc[[2*i], 0:freq_range].mul(np.conj(sig_in.iloc[[2*i], 0:freq_range])).reset_index(drop=True)).mean().to_frame().T)
temp_denominator_1_compare = (sig_in.iloc[[2*i], 0:freq_range].abs()**2).sum(axis=1).mean()# .apply(np.sqrt)
temp_denominator_2 = np.sqrt(sig_in.iloc[[2*j+1], 0:freq_range].mul(np.conj(sig_in.iloc[[2*j+1], 0:freq_range])).reset_index(drop=True).mean().to_frame().T)
temp_denominator = temp_denominator_1 * temp_denominator_2
temp = temp_numerator.divide(temp_denominator)
temp_maximum_value = temp
matrix_temp[i][j] = temp_maximum_value
coherence_cx_85_vsc_a = pd.DataFrame(matrix_temp)
'''
#######
correlation_matrix_min_01_comb(distance_cx_85_vsc_a,\
distance_cx_85_vsc_a_square,\
cm_title1='distance_cx_85_vsc_a(smallest) euclidian format',\
cm_title2='distance_cx_85_vsc_a(smallest) square')
correlation_matrix_min_01_comb(distance_cx_85_vsc_e,\
distance_cx_85_vsc_e_square,\
cm_title1='distance_cx_85_vsc_e(smallest) euclidian format',\
cm_title2='distance_cx_85_vsc_e(smallest) square')
correlation_matrix_min_01_comb(distance_cx_85_vsc_n,\
distance_cx_85_vsc_n_square,\
cm_title1='distance_cx_85_vsc_n(smallest) euclidian format',\
cm_title2='distance_cx_85_vsc_n(smallest) square')
correlation_matrix_min_01_comb(distance_cx_85_vsc_u,\
distance_cx_85_vsc_u_square,\
cm_title1='distance_cx_85_vsc_u(smallest) euclidian format',\
cm_title2='distance_cx_85_vsc_u(smallest) square')
# Calculate correlation
'''
# EFR
corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
# AS
corr_as_85_a = df_as_85.iloc[0:44, 0:4803].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_e = df_as_85.iloc[44:88, 0:4803].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_n = df_as_85.iloc[88:132, 0:4803].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_u = df_as_85.iloc[132:176, 0:4803].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_a_t = df_as_85.iloc[0:44, 0:4803].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_e_t = df_as_85.iloc[44:88, 0:4803].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_n_t = df_as_85.iloc[88:132, 0:4803].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_u_t = df_as_85.iloc[132:176, 0:4803].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_a_re = df_as_85.iloc[0:44, 0:4803].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_e_re = df_as_85.iloc[44:88, 0:4803].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_n_re = df_as_85.iloc[88:132, 0:4803].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_u_re = df_as_85.iloc[132:176, 0:4803].T.corr(method='pearson').iloc[22:44, 22:44]
#AS7
corr_as7_85_a = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_e = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_n = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_u = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_a_t = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_e_t = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_n_t = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_u_t = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_a_re = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_e_re = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_n_re = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_u_re = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# plot the frequency domain
# a vowel
'''
fig_mag_db(df_as_85_vsc_a, 1, title = '85dB a vowel Subject 1 in frequency domain', filename = '85_a_s1_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 2, title = '85dB a vowel Subject 2 in frequency domain', filename = '85_a_s2_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 3, title = '85dB a vowel Subject 3 in frequency domain', filename = '85_a_s3_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 4, title = '85dB a vowel Subject 4 in frequency domain', filename = '85_a_s4_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 5, title = '85dB a vowel Subject 5 in frequency domain', filename = '85_a_s5_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 6, title = '85dB a vowel Subject 6 in frequency domain', filename = '85_a_s6_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 7, title = '85dB a vowel Subject 7 in frequency domain', filename = '85_a_s7_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 8, title = '85dB a vowel Subject 8 in frequency domain', filename = '85_a_s8_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 9, title = '85dB a vowel Subject 9 in frequency domain', filename = '85_a_s9_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 10, title = '85dB a vowel Subject 10 in frequency domain', filename = '85_a_s10_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 11, title = '85dB a vowel Subject 11 in frequency domain', filename = '85_a_s11_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 12, title = '85dB a vowel Subject 12 in frequency domain', filename = '85_a_s12_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 13, title = '85dB a vowel Subject 13 in frequency domain', filename = '85_a_s13_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 14, title = '85dB a vowel Subject 14 in frequency domain', filename = '85_a_s14_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 15, title = '85dB a vowel Subject 15 in frequency domain', filename = '85_a_s15_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 16, title = '85dB a vowel Subject 16 in frequency domain', filename = '85_a_s16_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 17, title = '85dB a vowel Subject 17 in frequency domain', filename = '85_a_s17_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 18, title = '85dB a vowel Subject 18 in frequency domain', filename = '85_a_s18_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 19, title = '85dB a vowel Subject 19 in frequency domain', filename = '85_a_s19_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 20, title = '85dB a vowel Subject 20 in frequency domain', filename = '85_a_s20_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 21, title = '85dB a vowel Subject 21 in frequency domain', filename = '85_a_s21_f_domain.png')
fig_mag_db(df_as_85_vsc_a, 22, title = '85dB a vowel Subject 22 in frequency domain', filename = '85_a_s22_f_domain.png')
'''
# plot 22 figures in one figure
'''
fig_mag_in_1(df_as_win_85_vsc_a, title = '85dB a vowel subjects in frequency domain', \
path = path_result_freq, \
filename = '85_a_f_domain.png')
fig_test_in_1(df_as_85_vsc_a, df_as_win_85_vsc_a, title = '85dB a vowel test in frequency domain', \
path = path_result_freq, \
filename = '85_a_f_domain_test.png')
fig_retest_in_1(df_as_85_vsc_a, df_as_win_85_vsc_a, title = '85dB a vowel retests in frequency domain', \
path = path_result_freq, \
filename = '85_a_f_domain_retest.png')
'''
# e vowel
'''
fig_mag_db(df_as_85_vsc_e, 1, title = '85dB e vowel Subject 1 in frequency domain', filename = '85_e_s1_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 2, title = '85dB e vowel Subject 2 in frequency domain', filename = '85_e_s2_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 3, title = '85dB e vowel Subject 3 in frequency domain', filename = '85_e_s3_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 4, title = '85dB e vowel Subject 4 in frequency domain', filename = '85_e_s4_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 5, title = '85dB e vowel Subject 5 in frequency domain', filename = '85_e_s5_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 6, title = '85dB e vowel Subject 6 in frequency domain', filename = '85_e_s6_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 7, title = '85dB e vowel Subject 7 in frequency domain', filename = '85_e_s7_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 8, title = '85dB e vowel Subject 8 in frequency domain', filename = '85_e_s8_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 9, title = '85dB e vowel Subject 9 in frequency domain', filename = '85_e_s9_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 10, title = '85dB e vowel Subject 10 in frequency domain', filename = '85_e_s10_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 11, title = '85dB e vowel Subject 11 in frequency domain', filename = '85_e_s11_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 12, title = '85dB e vowel Subject 12 in frequency domain', filename = '85_e_s12_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 13, title = '85dB e vowel Subject 13 in frequency domain', filename = '85_e_s13_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 14, title = '85dB e vowel Subject 14 in frequency domain', filename = '85_e_s14_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 15, title = '85dB e vowel Subject 15 in frequency domain', filename = '85_e_s15_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 16, title = '85dB e vowel Subject 16 in frequency domain', filename = '85_e_s16_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 17, title = '85dB e vowel Subject 17 in frequency domain', filename = '85_e_s17_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 18, title = '85dB e vowel Subject 18 in frequency domain', filename = '85_e_s18_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 19, title = '85dB e vowel Subject 19 in frequency domain', filename = '85_e_s19_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 20, title = '85dB e vowel Subject 20 in frequency domain', filename = '85_e_s20_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 21, title = '85dB e vowel Subject 21 in frequency domain', filename = '85_e_s21_f_domain.png')
fig_mag_db(df_as_85_vsc_e, 22, title = '85dB e vowel Subject 22 in frequency domain', filename = '85_e_s22_f_domain.png')
'''
# plot 22 figures in one figure
'''
fig_mag_in_1(df_as_85_vsc_e, title = '85dB e vowel subjects in frequency domain', \
path = path_result_freq, \
filename = '85_e_f_domain.png')
fig_test_in_1(df_as_85_vsc_e, df_as_win_85_vsc_e, title = '85dB e vowel test in frequency domain', \
path = path_result_freq, \
filename = '85_e_f_domain_test.png')
fig_retest_in_1(df_as_85_vsc_e, df_as_win_85_vsc_e, title = '85dB e vowel retests in frequency domain', \
path = path_result_freq, \
filename = '85_e_f_domain_retest.png')
'''
# n vowel
'''
fig_mag_db(df_as_85_vsc_n, 1, title = '85dB n vowel Subject 1 in frequency domain', filename = '85_n_s1_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 2, title = '85dB n vowel Subject 2 in frequency domain', filename = '85_n_s2_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 3, title = '85dB n vowel Subject 3 in frequency domain', filename = '85_n_s3_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 4, title = '85dB n vowel Subject 4 in frequency domain', filename = '85_n_s4_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 5, title = '85dB n vowel Subject 5 in frequency domain', filename = '85_n_s5_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 6, title = '85dB n vowel Subject 6 in frequency domain', filename = '85_n_s6_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 7, title = '85dB n vowel Subject 7 in frequency domain', filename = '85_n_s7_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 8, title = '85dB n vowel Subject 8 in frequency domain', filename = '85_n_s8_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 9, title = '85dB n vowel Subject 9 in frequency domain', filename = '85_n_s9_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 10, title = '85dB n vowel Subject 10 in frequency domain', filename = '85_n_s10_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 11, title = '85dB n vowel Subject 11 in frequency domain', filename = '85_n_s11_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 12, title = '85dB n vowel Subject 12 in frequency domain', filename = '85_n_s12_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 13, title = '85dB n vowel Subject 13 in frequency domain', filename = '85_n_s13_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 14, title = '85dB n vowel Subject 14 in frequency domain', filename = '85_n_s14_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 15, title = '85dB n vowel Subject 15 in frequency domain', filename = '85_n_s15_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 16, title = '85dB n vowel Subject 16 in frequency domain', filename = '85_n_s16_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 17, title = '85dB n vowel Subject 17 in frequency domain', filename = '85_n_s17_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 18, title = '85dB n vowel Subject 18 in frequency domain', filename = '85_n_s18_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 19, title = '85dB n vowel Subject 19 in frequency domain', filename = '85_n_s19_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 20, title = '85dB n vowel Subject 20 in frequency domain', filename = '85_n_s20_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 21, title = '85dB n vowel Subject 21 in frequency domain', filename = '85_n_s21_f_domain.png')
fig_mag_db(df_as_85_vsc_n, 22, title = '85dB n vowel Subject 22 in frequency domain', filename = '85_n_s22_f_domain.png')
'''
# plot 22 figures in one figure
'''
fig_mag_in_1(df_as_85_vsc_n, title = '85dB n vowel subjects in frequency domain', \
path = path_result_freq, \
filename = '85_n_f_domain.png')
fig_test_in_1(df_as_85_vsc_n, df_as_win_85_vsc_n, title = '85dB n vowel test in frequency domain', \
path = path_result_freq, \
filename = '85_n_f_domain_test.png')
fig_retest_in_1(df_as_85_vsc_n, df_as_win_85_vsc_n, title = '85dB n vowel retests in frequency domain', \
path = path_result_freq, \
filename = '85_n_f_domain_retest.png')
'''
# u vowel
'''
fig_mag_db(df_as_85_vsc_u, 1, title = '85dB u vowel Subject 1 in frequency domain', filename = '85_u_s1_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 2, title = '85dB u vowel Subject 2 in frequency domain', filename = '85_u_s2_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 3, title = '85dB u vowel Subject 3 in frequency domain', filename = '85_u_s3_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 4, title = '85dB u vowel Subject 4 in frequency domain', filename = '85_u_s4_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 5, title = '85dB u vowel Subject 5 in frequency domain', filename = '85_u_s5_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 6, title = '85dB u vowel Subject 6 in frequency domain', filename = '85_u_s6_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 7, title = '85dB u vowel Subject 7 in frequency domain', filename = '85_u_s7_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 8, title = '85dB u vowel Subject 8 in frequency domain', filename = '85_u_s8_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 9, title = '85dB u vowel Subject 9 in frequency domain', filename = '85_u_s9_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 10, title = '85dB u vowel Subject 10 in frequency domain', filename = '85_u_s10_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 11, title = '85dB u vowel Subject 11 in frequency domain', filename = '85_u_s11_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 12, title = '85dB u vowel Subject 12 in frequency domain', filename = '85_u_s12_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 13, title = '85dB u vowel Subject 13 in frequency domain', filename = '85_u_s13_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 14, title = '85dB u vowel Subject 14 in frequency domain', filename = '85_u_s14_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 15, title = '85dB u vowel Subject 15 in frequency domain', filename = '85_u_s15_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 16, title = '85dB u vowel Subject 16 in frequency domain', filename = '85_u_s16_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 17, title = '85dB u vowel Subject 17 in frequency domain', filename = '85_u_s17_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 18, title = '85dB u vowel Subject 18 in frequency domain', filename = '85_u_s18_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 19, title = '85dB u vowel Subject 19 in frequency domain', filename = '85_u_s19_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 20, title = '85dB u vowel Subject 20 in frequency domain', filename = '85_u_s20_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 21, title = '85dB u vowel Subject 21 in frequency domain', filename = '85_u_s21_f_domain.png')
fig_mag_db(df_as_85_vsc_u, 22, title = '85dB u vowel Subject 22 in frequency domain', filename = '85_u_s22_f_domain.png')
'''
# plot 22 figures in one figure
'''
fig_mag_in_1(df_as_85_vsc_u, title = '85dB u vowel subjects in frequency domain', \
path = path_result_freq, \
filename = '85_u_f_domain.png')
fig_test_in_1(df_as_85_vsc_u, df_as_win_85_vsc_u, title = '85dB u vowel test in frequency domain', \
path = path_result_freq, \
filename = '85_u_f_domain_test.png')
fig_retest_in_1(df_as_85_vsc_u, df_as_win_85_vsc_u, title = '85dB u vowel retests in frequency domain', \
path = path_result_freq, \
filename = '85_u_f_domain_retest.png')
'''
# example
#correlation_matrix_01(corr_EFR_a_85_r_t_part, 'a_vowel_85Db cross correlation of test and retest')
'''
# EFR
correlation_matrix_01(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_tt_01(corr_EFR_avg_85_a_t, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_rr_01(corr_EFR_avg_85_a_re, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_tt_01(corr_EFR_avg_85_e_t, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_rr_01(corr_EFR_avg_85_e_re, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_tt_01(corr_EFR_avg_85_n_t, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_rr_01(corr_EFR_avg_85_n_re, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
correlation_matrix_tt_01(corr_EFR_avg_85_u_t, 'cross correlation of 85dB u_vowel in time domain')
correlation_matrix_rr_01(corr_EFR_avg_85_u_re, 'cross correlation of 85dB u_vowel in time domain')
'''
'''
# Amplitude Spectrum
correlation_matrix_01(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix_01(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix_01(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix_01(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
# Amplitude Spectrum 7 points
correlation_matrix_01(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
'''
'''
# original test
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.matshow(corr_EFR_a_85_r_t_part, cmap='gray') # cmap=plt.cm.gray
plt.title('cross correlation of test and retest')
plt.colorbar() # show the color bar on the right side of the figure
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
'''
'''
#plt.subplot(1,3,1)
plt.matshow(corr_as_test)# cmap=plt.cm.gray
plt.title('cross correlation of test subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,2)
plt.matshow(corr_as_retest) # cmap=plt.cm.gray
plt.title('cross correlation of retest subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,3)
plt.matshow(corr_as_t_r) # cmap=plt.cm.gray
plt.title('cross correlation of test and retest')
plt.colorbar() # show the color bar on the right side of the figure
plt.matshow(corr_as_norm_test)# cmap=plt.cm.gray
plt.title('auto correlation of normalized test subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,2)
plt.matshow(corr_as_norm_retest) # cmap=plt.cm.gray
plt.title('auto correlation of normalized retest subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,3)
plt.matshow(corr_as_norm_t_r) # cmap=plt.cm.gray
plt.title('corss correlation of normalized test and retest')
plt.colorbar() # show the color bar on the right side of the figure
'''
"""
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright © 2016 root <root@VM-17-202-debian>
#create a list
xs = [3, 1, 2]
print xs, xs[2]
print xs[-1]
xs[2] = 'foo'
print xs
xs.append('bar')
print xs
print xs.pop(), xs
nums = [0,1,2,3]
squares = []
for x in nums:
squares.append(x**2)
print squares
even_squares = [x**2 for x in nums if x > 0]
print even_squares
|
import binascii
## Below function helps to convert a little endian hex string to decimal value
def le_2_de(a):
l=''
k=0
for i in range(0,len(a)/2):
k=len(a)-2*i
l=l+a[k-2:k]
l=int(l,16)
return l
### Below function is to pull the hex value from hex data of image and print output
def content_pull(a,b):
l=int(a,16)*2
data=le_2_de((content[l:l+2*b])) ### here user defined function le_2_de is used to find decimal for little_endian
return data
### Main program starts here
## /Volumes/Ram/Study/Fall/Digital Forensics/Assignment 3
filename =input("Enter Image file path : ")
f= open(filename,'rb')
content = f.read()
content=binascii.hexlify(content)
f.close()
content_end_byte=len(content)/2
## Created a dictionary boot_dict which stores all the required offsets and lengths
boot_dict={'BPS_O':'000B','BPS_S':2,'SPC_O':'000D','SPC_S':1,'RS_O':'000E','RS_S':2,'NOFC_O':'0010','NOFC_S':1,'NORE_O':'0011','NORE_S':2,'SPF_O':'0016','SPF_S':2}
BPS=content_pull(boot_dict['BPS_O'],boot_dict['BPS_S'])
RS=content_pull(boot_dict['RS_O'],boot_dict['RS_S'])
SPC=content_pull(boot_dict['SPC_O'],boot_dict['SPC_S'])
NOFC=content_pull(boot_dict['NOFC_O'],boot_dict['NOFC_S'])
NORE=content_pull(boot_dict['NORE_O'],boot_dict['NORE_S'])
SPF=content_pull(boot_dict['SPF_O'],boot_dict['SPF_S'])
print("Bytes occupied by Boot Block :"+str(BPS)+"\n Bytes per Sector: "+str(BPS)+" \n Reserved Sectors :"+str(RS)+" \n Sectors per Cluster :"
+str(SPC)+" \n No of FAT copies :"+str(NOFC)+" \n No of Root entries :"+str(NORE)+"\n Sectors per FAT :"+str(SPF))
BOFFAT=RS*BPS
BOSFAT=BOFFAT+SPF*BPS
BOFRDE=BOFFAT+(NOFC*SPF*BPS)
BOFDTB=BOFRDE+NORE*32
NODBB=content_end_byte-BOFDTB
print("Byte Ofset 1st FAT :"+str(BOFFAT)+"\n Byte Ofset of 2nd FAT :"+str(BOSFAT)+"\n Byte Ofset of 1st Root directory Entry :"+str(BOFRDE)+
"\n Byte Ofset of 1st Data Block :"+str(BOFDTB)+"\n Number of Bytes in Data Block: "+str(NODBB))
|
#Changes:
# OFFSET(B2,0,4)
#To:
# INDIRECT("'New Matrix'!$F$" & ROW($A2))
import string
from itertools import combinations_with_replacement as cwr
alphabet = string.ascii_lowercase
length = 2
alpha1 = ["".join(letter) for letter in string.ascii_lowercase]
alpha2 = ["".join(comb) for comb in cwr(alphabet, length)]
extended_alphabet = alpha1+alpha2
string = r'IF(ISBLANK(OFFSET(B2,0,4)),"",OFFSET(B2,0,4)&"?"&"utm_source=facebook&utm_medium="&IF(OR(OFFSET(B2,0,13)="des",OFFSET(B2,0,13)="mob",OFFSET(B2,0,13)="mob-tab",OFFSET(B2,0,13)="a",OFFSET(B2,0,13)="mob-all"),"pla","banr")&"&utm_term="&SUBSTITUTE(LOWER(AlphaNumericOnly(SUBSTITUTE(SUBSTITUTE(SUBSTITUTE(OFFSET(B2,0,19),"e","e"),"&","and"),"+","and")))," ","_")&"&utm_content="&OFFSET(B2,0,17)&"&utm_campaign="&OFFSET(B2,0,2)&"&db_dest="&OFFSET(B2,0,21)&"&db_unit=d&db_cid=60&db_terms="&OFFSET(B2,0,23)&"-"&OFFSET(B2,0,24)&"&db_class="&IF(OR(OFFSET(B2,0,13)="des",OFFSET(B2,0,13)="banr"),"des",OFFSET(B2,0,13))&"&db_seg="&OFFSET(B2,0,22)&"&db_sku=&c3ch=Facebook&c3nid="&OFFSET(B2,0,2)&"&db_vref1="&IF(ISBLANK(OFFSET(B2,0,25)),"n",OFFSET(B2,0,25))&"&db_vref2="&IF(ISBLANK(OFFSET(B2,0,26)),"n",OFFSET(B2,0,26))&"&lb=force")'
substring = 'OFFSET'
# for substring in string:
# print substring
start = string.find('OFFSET')
end = string.find(')', start)
num_reference = string[start+6:end].split(',')[2]
letter_reference = extended_alphabet[int(num_reference)+1]
text_to_replace = string[start:end+1]
print text_to_replace
ind_reference = r'INDIRECT("\'New Matrix\'!${0}$" & ROW($A2))'.format(letter_reference.upper())
print ind_reference
while substring in string:
start = string.find('OFFSET')
end = string.find(')', start)
num_reference = string[start+6:end].split(',')[2]
letter_reference = extended_alphabet[int(num_reference)+1]
text_to_replace = string[start:end+1]
ind_reference = r'INDIRECT("\'New Matrix\'!${0}$" & ROW($A2))'.format(letter_reference.upper())
print text_to_replace
print ind_reference
string = string.replace(text_to_replace,ind_reference,100)
print string
|
#!/usr/bin/env python3
import ruamel.yaml
from ruamel.yaml.comments import (CommentedMap, CommentedSeq)
import fileinput
import os
import sys
from netaddr import IPNetwork
# This version saves comments/edits in YAML files
yaml = ruamel.yaml.YAML()
yaml.indent(sequence=4, mapping=2, offset=2)
# Input
colte_vars = "/etc/colte/config.yml"
# EPC conf-files
hss = "/etc/open5gs/hss.yaml"
pcrf = "/etc/open5gs/pcrf.yaml"
mme = "/etc/open5gs/mme.yaml"
sgwc = "/etc/open5gs/sgwc.yaml"
sgwu = "/etc/open5gs/sgwu.yaml"
smf = "/etc/open5gs/smf.yaml"
upf = "/etc/open5gs/upf.yaml"
# Haulage
haulage = "/etc/haulage/config.yml"
# Other files
colte_nat_script = "/usr/bin/coltenat"
network_vars = "/etc/systemd/network/99-open5gs.network"
webgui_env = "/etc/colte/webgui.env"
webadmin_env = "/etc/colte/webadmin.env"
def update_env_file(file_name, colte_data):
env_data = {}
with open(file_name, 'r') as file:
env_data = yaml.load(file.read().replace("=", ": "))
env_data["DB_USER"] = colte_data["mysql_user"]
env_data["DB_PASSWORD"] = colte_data["mysql_password"]
env_data["DB_NAME"] = colte_data["mysql_db"]
# Get data in YAML format
with open(file_name, 'w') as file:
# Save the results
yaml.dump(env_data, file)
# Update data in correct format
new_text = ""
with open(file_name, 'r') as file:
new_text = file.read().replace(": ", "=")
# Save in correct format
with open(file_name, 'w') as file:
file.write(new_text)
def enable_ip_forward():
replaceAll("/etc/sysctl.conf", "net.ipv4.ip_forward", "net.ipv4.ip_forward=1\n", True)
os.system('sysctl -w net.ipv4.ip_forward=1')
def update_colte_nat_script(colte_data):
replaceAll(colte_nat_script, "ADDRESS=", "ADDRESS=" + colte_data["lte_subnet"]+ "\n", False)
def update_network_vars(colte_data):
net = IPNetwork(colte_data["lte_subnet"])
netstr = str(net[1]) + "/" + str(net.prefixlen)
replaceAll(network_vars, "Address=", "Address=" + netstr + "\n", True)
def replaceAll(file, searchExp, replaceExp, replace_once):
is_replaced = False
for line in fileinput.input(file, inplace=1):
if searchExp in line:
if replace_once:
if not is_replaced:
line = replaceExp
is_replaced = True
else:
line = ""
else:
line = replaceExp
sys.stdout.write(line)
def update_hss(colte_data):
hss_data = {}
with open(hss, 'r+') as file:
hss_data = yaml.load(file.read())
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(hss_data, ["logger"], CommentedMap())
hss_data["logger"]["file"] = "/dev/null"
with open(hss, 'w') as file:
# Save the results
yaml.dump(hss_data, file)
def update_mme(colte_data):
mme_data = {}
with open(mme, 'r+') as file:
mme_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(mme_data, ["mme"], CommentedMap())
_create_field_if_not_exist(mme_data, ["mme", "gummei"], CommentedMap())
_create_field_if_not_exist(mme_data, ["mme", "gummei", "plmn_id"], CommentedMap())
mme_data["mme"]["gummei"]["plmn_id"]["mcc"] = colte_data["mcc"]
mme_data["mme"]["gummei"]["plmn_id"]["mnc"] = colte_data["mnc"]
_create_field_if_not_exist(mme_data, ["mme", "tai"], CommentedMap())
_create_field_if_not_exist(mme_data, ["mme", "tai", "plmn_id"], CommentedMap())
mme_data["mme"]["tai"]["plmn_id"]["mcc"] = colte_data["mcc"]
mme_data["mme"]["tai"]["plmn_id"]["mnc"] = colte_data["mnc"]
mme_data["mme"]["s1ap"] = CommentedSeq()
mme_data["mme"]["s1ap"].append({'addr': colte_data["enb_iface_addr"]})
_create_field_if_not_exist(mme_data, ["mme", "network_name"], CommentedMap())
mme_data["mme"]["network_name"]["full"] = colte_data["network_name"]
mme_data["mme"]["gtpc"] = CommentedSeq()
mme_data["mme"]["gtpc"].append({"addr": "127.0.0.2"})
_create_field_if_not_exist(mme_data, ["sgwc"], CommentedMap())
mme_data["sgwc"]["gtpc"] = CommentedSeq()
mme_data["sgwc"]["gtpc"].append({"addr": "127.0.0.3"})
_create_field_if_not_exist(mme_data, ["smf"], CommentedMap())
mme_data["smf"]["gtpc"] = CommentedSeq()
mme_data["smf"]["gtpc"].append({"addr": "127.0.0.4"})
mme_data["smf"]["gtpc"].append({"addr": "::1"})
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(mme_data, ["logger"], CommentedMap())
mme_data["logger"]["file"] = "/dev/null"
with open(mme, 'w') as file:
# Save the results
yaml.dump(mme_data, file)
def update_pcrf(colte_data):
pcrf_data = {}
with open(pcrf, 'r+') as file:
pcrf_data = yaml.load(file.read())
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(pcrf_data, ["logger"], CommentedMap())
pcrf_data["logger"]["file"] = "/dev/null"
with open(pcrf, 'w') as file:
# Save the results
yaml.dump(pcrf_data, file)
def update_sgwc(colte_data):
sgwc_data = {}
with open(sgwc, 'r') as file:
sgwc_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(sgwc_data, ["sgwc"], CommentedMap())
sgwc_data["sgwc"]["gtpc"] = CommentedSeq()
sgwc_data["sgwc"]["gtpc"].append({"addr": "127.0.0.3"})
sgwc_data["sgwc"]["pfcp"] = CommentedSeq()
sgwc_data["sgwc"]["pfcp"].append({"addr": "127.0.0.3"})
# Link towards the SGW-U
_create_field_if_not_exist(sgwc_data, ["sgwu"], CommentedMap())
sgwc_data["sgwu"]["pfcp"] = CommentedSeq()
sgwc_data["sgwu"]["pfcp"].append({"addr": "127.0.0.6"})
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(sgwc_data, ["logger"], CommentedMap())
sgwc_data["logger"]["file"] = "/dev/null"
with open(sgwc, 'w') as file:
# Save the results
yaml.dump(sgwc_data, file)
def update_sgwu(colte_data):
sgwu_data = {}
with open(sgwu, 'r') as file:
sgwu_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(sgwu_data, ["sgwu"], CommentedMap())
sgwu_data["sgwu"]["gtpu"] = CommentedSeq()
sgwu_data["sgwu"]["gtpu"].append({"addr": colte_data["enb_iface_addr"]})
sgwu_data["sgwu"]["pfcp"] = CommentedSeq()
sgwu_data["sgwu"]["pfcp"].append({"addr": "127.0.0.6"})
# Link towards the SGW-C
# TODO(matt9j) This might be the wrong address! Not included in the default
#create_fields_if_not_exist(sgwu_data, ["sgwc", "pfcp"])
#sgwu_data["sgwc"]["pfcp"][0]["addr"] = "127.0.0.3"
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(sgwu_data, ["logger"], CommentedMap())
sgwu_data["logger"]["file"] = "/dev/null"
with open(sgwu, 'w') as file:
# Save the results
yaml.dump(sgwu_data, file)
def update_smf(colte_data):
smf_data = {}
with open(smf, 'r+') as file:
smf_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(smf_data, ["smf"], CommentedMap())
smf_data["smf"]["gtpc"] = CommentedSeq()
smf_data["smf"]["gtpc"].append({'addr': "127.0.0.4"})
smf_data["smf"]["gtpc"].append({'addr': "::1"})
smf_data["smf"]["pfcp"] = CommentedSeq()
smf_data["smf"]["pfcp"].append({'addr': "127.0.0.4"})
smf_data["smf"]["pfcp"].append({'addr': "::1"})
smf_data["smf"]["subnet"] = CommentedSeq()
net = IPNetwork(colte_data["lte_subnet"])
netstr = str(net[1]) + "/" + str(net.prefixlen)
smf_data["smf"]["subnet"].append({'addr': netstr})
smf_data["smf"]["dns"] = CommentedSeq()
smf_data["smf"]["dns"].append(colte_data["dns"])
smf_data["smf"]["mtu"] = 1400
# Create link to UPF
_create_field_if_not_exist(smf_data, ["upf"], CommentedMap())
smf_data["upf"]["pfcp"] = CommentedSeq()
smf_data["upf"]["pfcp"].append({'addr': "127.0.0.7"})
# Disable 5GC NRF link while operating EPC only
if "nrf" in smf_data:
del smf_data["nrf"]
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(smf_data, ["logger"], CommentedMap())
smf_data["logger"]["file"] = "/dev/null"
with open(smf, 'w') as file:
# Save the results
yaml.dump(smf_data, file)
def update_upf(colte_data):
upf_data = {}
with open(upf, 'r+') as file:
upf_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(upf_data, ["upf"], CommentedMap())
upf_data["upf"]["pfcp"] = CommentedSeq()
upf_data["upf"]["pfcp"].append({'addr': "127.0.0.7"})
upf_data["upf"]["gtpu"] = CommentedSeq()
upf_data["upf"]["gtpu"].append({'addr': "127.0.0.7"})
upf_data["upf"]["subnet"] = CommentedSeq()
net = IPNetwork(colte_data["lte_subnet"])
netstr = str(net[1]) + "/" + str(net.prefixlen)
upf_data["upf"]["subnet"].append({'addr': netstr})
# Link to the SMF
# TODO(matt9j) Might not be needed
# create_fields_if_not_exist(upf_data, ["smf"]["pfcp"])
# upf_data["smf"]["pfcp"] = {'addr': "127.0.0.3"}
# Disable internal file logging since journald is capturing stdout
_create_field_if_not_exist(upf_data, ["logger"], CommentedMap())
upf_data["logger"]["file"] = "/dev/null"
with open(upf, 'w') as file:
# Save the results
yaml.dump(upf_data, file)
def update_haulage(colte_data):
haulage_data = {}
with open(haulage, 'r') as file:
haulage_data = yaml.load(file.read())
# Create fields in the data if they do not yet exist
_create_field_if_not_exist(haulage_data, ["custom"], CommentedMap())
haulage_data["userSubnet"] = colte_data["lte_subnet"]
haulage_data["ignoredUserAddresses"] = [str(IPNetwork(colte_data["lte_subnet"])[1])]
haulage_data["custom"]["dbUser"] = colte_data["mysql_user"]
haulage_data["custom"]["dbLocation"] = colte_data["mysql_db"]
haulage_data["custom"]["dbPass"] = colte_data["mysql_password"]
# Hard-coded values
haulage_data["interface"] = "ogstun"
with open(haulage, 'w') as file:
# Save the results
yaml.dump(haulage_data, file)
def _create_field_if_not_exist(dictionary, field_path, value):
current_entry = dictionary
for i, field in enumerate(field_path):
if (i == len(field_path) - 1) and (field not in current_entry or current_entry[field] is None):
current_entry[field] = value
else:
try:
current_entry = current_entry[field]
except KeyError as e:
print("Failed to create key at path", field_path)
print("Current configuration state is:", dictionary)
raise KeyError("Failed to create key at path {}, with base error {}".format(field_path, e))
def stop_all_services():
_control_metering_services("stop")
_control_epc_services("stop")
_control_nat_services("stop")
def _control_metering_services(action):
os.system('systemctl {} haulage colte-webgui colte-webadmin'.format(action))
def _control_nat_services(action):
os.system('systemctl {} colte-nat'.format(action))
def _control_epc_services(action):
os.system('systemctl {} open5gs-hssd open5gs-mmed open5gs-sgwcd open5gs-sgwud open5gs-pcrfd open5gs-smfd open5gs-upfd'.format(action))
RED='\033[0;31m'
NC='\033[0m'
if os.geteuid() != 0:
print("colteconf: ${RED}error:${NC} Must run as root! \n")
exit(1)
# Read old vars and update yaml
with open(colte_vars, 'r') as file:
colte_data = yaml.load(file.read())
# Update yaml files
update_hss(colte_data)
update_mme(colte_data)
update_pcrf(colte_data)
update_sgwc(colte_data)
update_sgwu(colte_data)
update_smf(colte_data)
update_upf(colte_data)
update_haulage(colte_data)
# Update other files
update_colte_nat_script(colte_data)
update_network_vars(colte_data)
update_env_file(webadmin_env, colte_data)
update_env_file(webgui_env, colte_data)
# always enable kernel ip_forward
enable_ip_forward()
# Restart everything to pick up new configurations, and don't restart
# networkd while the EPC or metering are running.
stop_all_services()
os.system('systemctl restart systemd-networkd')
# Start enabled services and update enabled/disabled state
if (colte_data["metered"] == True):
_control_metering_services("start")
_control_metering_services("enable")
else:
_control_metering_services("disable")
if (colte_data["epc"] == True):
_control_epc_services("start")
_control_epc_services("enable")
else:
_control_epc_services("disable")
if (colte_data["nat"] == True):
_control_nat_services("start")
_control_nat_services("enable")
else:
_control_nat_services("disable")
|
import os
import errno
import uuid
import settings
from unicodedata import normalize
import re
from django.db.models import Q
def create_obj2(klass_obj, params):
return create_obj({}, klass_obj, params)
def create_obj(id, klass_obj, params):
obj = None
if id:
try:
obj = klass_obj.objects.get(**id)
except klass_obj.DoesNotExist:
obj = None
if obj is not None:
print "%s (%s) ja existe" % (klass_obj.__name__, obj.id)
else:
obj = klass_obj(**params)
obj.clean()
obj.save()
print "%s (%s) criado com sucesso..." % (klass_obj.__name__, obj.id)
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def gen_num_pedido():
d = uuid.uuid4()
res = d.hex
return 'BR' + res[0:6]
def gen_num_protocolo():
d = uuid.uuid4()
res = d.hex
return res[0:8]
def gen_random():
d = uuid.uuid4()
str = d.hex
return str[0:16]
def to_ascii(txt, codif='utf-8'):
if not isinstance(txt, basestring):
txt = unicode(txt)
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return normalize('NFKD', txt.decode(codif)).encode('ASCII', 'ignore')
def doc_location(instance, filename):
id = instance.nome if hasattr(instance, 'nome') else instance.codigo
root_path = os.path.join(settings.MODEL_DOC_ROOT, type(instance).__name__, id)
full_path = settings.MEDIA_ROOT + os.path.join('/', root_path)
if not os.path.exists(full_path):
os.makedirs(full_path)
return os.path.join(root_path, filename).replace('\\', '/')
def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in individual keywords, getting rid of unnecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result |
from django.shortcuts import render
from django.contrib.auth import get_user_model
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework import status
from .serializers import RegisterSerializer
User = get_user_model()
class RegisterView(CreateAPIView):
"""
Class for Register
"""
serializer_class = RegisterSerializer
permission_classes = [AllowAny, ]
queryset = User.objects.all()
|
from main_files.db.connect_db import CONNECTION
cur = CONNECTION.cursor()
query = """
UPDATE symptom_tag_relations
SET
symptom_id = %s,
tag_id = %s
WHERE symptom_tag_relation_id = %s
"""
def update_symptom_tag_relations(symptom_id=None, tag_id=None):
cur.execute(query, [
symptom_id,
tag_id
])
CONNECTION.commit()
# cur.close()
|
# coding: utf-8
# In[ ]:
from hdx.utilities.easy_logging import setup_logging
from hdx.hdx_configuration import Configuration
from hdx.data.dataset import Dataset
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
import re
# In[ ]:
setup_logging()
Configuration.create(hdx_site='prod', user_agent='A_Quick_Example', hdx_read_only=True)
# In[ ]:
#Check if the dataset has at least 1 resource of the required file type(s).
def check_type(dataset, file_types=[]):
temp_dataset = Dataset.read_from_hdx(dataset)
temp_dataset.separate_resources()
if (len(temp_dataset.resources) > 0):
if (len(file_types) > 0):
if (not set(temp_dataset.get_filetypes()).isdisjoint(file_types)):
return True
else :
return True
return False
# In[ ]:
# Check if the dataset is tagged with HXL tag, not provided by HXL
def check_organization(dataset):
if dataset.get_organization()['title'] != 'Humanitarian Exchange Language(HXL)':
return True
return False
# In[ ]:
nltk.download('stopwords')
from nltk.corpus import stopwords
#CLEANING AND GENERATING N-GRAMS
def lower_cols(lst):
#convert data to lowercases
#QUESTION: will I miss anyt important information?
return [word.lower() for word in lst]
#Question: is HXL Core Schema.csv something we can use for comparing words??
#This method is going to take up a lot of space and time. Is it worth it? Are there any other ways to go about it?
def remove_chars(lst):
#remove punctuation characters such as ",", "(", ")", """, ":", "/", and "."
#NOTE: PRESERVES WHITE SPACE.
#QUESTION: any other characters we should be aware of? Is this a good idea? I'm inspecting each word individually.
#Any potential pitfalls?
cleaned = [re.sub('\s+', ' ', mystring).strip() for mystring in lst]
cleaned = [re.sub(r'[[^A-Za-z0-9\s]+]', ' ', mystr) for mystr in cleaned]
cleaned = [mystr.replace('_', ' ') for mystr in cleaned]
return cleaned
stopWords = set(stopwords.words('english'))
def remove_stop_words(data_lst):
#remove stopwords from the data including 'the', 'and' etc.
wordsFiltered = []
for w in data_lst:
if w not in stopWords:
wordsFiltered.append(w)
return wordsFiltered
def clean_cols(data):
data = lower_cols(data)
data = remove_chars(data)
return data
# In[ ]:
# Download one dataset with certain type(s), read it into Dataframe,
# add all headers, tags and dataset names to our DataFrame,
# and delete the dataset
def process_dataset_2(dataset, file_type, dataframe, download_path, index, row_limit = 10):
global count
organization = ""
# Download one dataset and read it into a DataFrame
if (file_type == None):
url, path = dataset.resources[0].download(download_path)
pandas_dataset = pd.read_csv(path)
else:
if (file_type not in dataset.get_filetypes()):
return 'Error: Required file type not in dataset OR dataset does not contain any resources.'
try:
url, path = dataset.resources[dataset.get_filetypes().index(file_type)].download(download_path)
organization = dataset.get_organization()['title']
print('Resource URL %s downloaded to %s' % (url, path))
pandas_dataset = pd.read_csv(path, encoding='latin-1')
pandas_dataset = pandas_dataset.head(row_limit)
except:
return 'Unknown error.'
#if "HXL" in os.path.basename(path) or "hxl" in os.path.basename(path):
#return dataset_df
# Add headers, tags and data to our DataFrame if current dataset not empty
if (not pandas_dataset.empty):
dataset_df = pandas_dataset
headers = list(dataset_df.columns.values)
headers = clean_cols(headers)
tags = list(dataset_df.iloc[0,:])
for i in range(len(headers)):
try:
splitted = re.split('[(^\s+)+#]', tags[i])
splitted = list(filter(None, splitted))
hashtag = splitted[0]
attributes = splitted[1:]
dic = {'Header': headers[i], 'Tag': hashtag, 'Attributes': attributes,
'Data': list(dataset_df.iloc[1:, i]),
'Relative Column Position': (i+1) / len(dataset_df.columns),
'Dataset_name': os.path.basename(path),
'Organization': organization,
'Index': index}
dataframe.loc[len(dataframe)] = dic
except:
print("Error: different number of headers and tags")
count += 1
os.remove(path)
print("File Removed!")
return
# In[ ]:
# Search for all datasets with HXL tags
datasets_HXL = Dataset.search_in_hdx('HXL')
len(datasets_HXL)
# In[ ]:
# Create a DataFrame for all headers and tags
col_names = ['Header', 'Tag', 'Attributes','Data','Relative Column Position','Dataset_name', 'Organization','Index']
headers_and_tags= pd.DataFrame(columns = col_names)
# In[ ]:
count = 0
for i in range(150):
rand_dataset = np.random.randint(0, len(datasets_HXL))
process_dataset_2(datasets_HXL[rand_dataset], 'CSV', headers_and_tags, './datasets', count)
print(i)
# In[ ]:
headers_and_tags.to_excel("headerandtag.xlsx")
# In[ ]:
headers_and_tags.head(200)
# In[ ]:
#counting column names that appear the most frequently in the set:
from collections import Counter
counts = Counter(headers_and_tags.iloc[:, 0])
print(counts)
# In[ ]:
#implementing Bag Of Words Model
#STEPS
#1) collect all col_names that have the same underlying meaning ex: deaths/fatalities etc. (see tags)
#2) construct another dataframe that consists of the following headers:
#3) [Header, Word Frequency]... could be a dictionary or multi-indexed dataframe where second column is all the words that
#appear in the data under the given col name and the value would be count of word that appears.
#4) --> feature.
# In[ ]:
#implementing n-grams Model
import nltk
def generate_n_grams(data_lst, n):
cleaned = remove_chars(list(data_lst))
cleaned = clean_cols(cleaned)
cleaned = remove_stop_words(cleaned)
#make sure that n_grams 'refresh' when a new dataset is encountered!!!!
return list(ngrams(cleaned, n))
# In[ ]:
#creating a n-gram frequency table (this is hopefully useful for determining if adjacent columns have effect on tags)
#this can also be applied to exploring correlation between tags and attributes.
#any applications to the data itself? Should I treat all of the data as a single list of words? Example:
#currently thinking of using BOW to the data itself?
def count_stats_grams(two_d_arr):
#np.unique 'axis' attribute doesn't work on my computer...
lst = np.array([])
count = 0
singles_count = 0
multiples_count = 0
for arr in two_d_arr:
if arr not in lst:
count += 1
np.append(lst, arr)
if two_d_arr.count(arr) == 1:
singles_count += 1
if two_d_arr.count(arr) > 1:
multiples_count += 1
check = count - singles_count
assert(check == multiples_count)
return count, singles_count, multiples_count
def n_gram_freqs(dataframe, max_n = 4):
n_gram_cols = ['n-gram', 'data' ,'unique ngrams', 'multiples', 'singles']
n_gram_freqs = pd.DataFrame(columns = n_gram_cols)
for i in range(max_n):
n = i+1
n_grams = generate_n_grams(dataframe['Header'], n)
unique_n_grams, singles, multiples = count_stats_grams(n_grams)
row = {'n-gram': n,
'data': n_grams,
'unique ngrams': unique_n_grams,
'multiples': multiples,
'singles': singles}
n_gram_freqs.loc[len(n_gram_freqs)] = row
return pd.DataFrame(n_gram_freqs)
# In[ ]:
#Takes a data row and cleans it for model input
import ast
import itertools
def word_extract(row):
ignore = ['nan']
#words = ast.literal_eval(row)
no_white = [i.lstrip() for i in row if i not in ignore and not isinstance(i, float)]
#divide_space = [i.split() for i in no_white]
cleaned_text = [w.lower() for w in no_white if w not in ignore]
return cleaned_text
long_string = []
for i in headers_and_tags['Data']:
result_by_tag = word_extract(i)
holder_list = ' '.join(result_by_tag)
long_string.append(holder_list)
# In[ ]:
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
corpus = long_string
X_vecs = vectorizer.fit_transform(corpus)
#np.set_printoptions(threshold = np.inf)
#print(X.toarray())
# In[ ]:
print(np.shape(X_vecs.toarray()))
# In[ ]:
#testing MLP Classifier
from sklearn.neural_network import MLPClassifier
from fastText import load_model
from sklearn.model_selection import train_test_split
fasttext_model = 'wiki.en.bin'
fmodel = load_model(fasttext_model)
print("Pre-trained model loaded successfully!\n")
# In[ ]:
#Classification using only headers
df = headers_and_tags
df['Header_embedding'] = df['Header'].map(lambda x: fmodel.get_sentence_vector(str(x)))
df['Organization_embedded'] = df['Organization'].map(lambda x: fmodel.get_sentence_vector(str(x)))
#df['data_embedding'] = df['Data'].map(lambda lst: [fmodel.get_sentence_vector(str(x)) for x in lst])
#df['ngram']
print("Word embeddings extracted!\n")
#test = df['Header_embedding'] + df['Organization_embedded']
#combining so that the matrix will be in the right shape...
#not sure if this will make sense...
#print(np.shape(test))
X_train, X_test, y_train, y_test = train_test_split(df['Header_embedding'],
df['Tag'], test_size=0.33, random_state=0)
clf = MLPClassifier(activation='relu', alpha=0.001, epsilon=1e-08, hidden_layer_sizes=150, solver='adam')
#have to ensure X_train is the right shape
#temp = X_train.to_csv('temp.csv', header = False)
#X_train = pd.read_csv('temp.csv', header = None)
#os.remove('temp.csv')
#X_train.index = X_train['0']
#X_train.columns = [x for x in range(len(X_train.columns))]
#X_train = np.reshape(X_train, (len(X_train), len(X_train.columns)))
clf.fit(X_train.values.tolist(), y_train.values.tolist())
test_score = clf.score(X_test.tolist(), y_test.tolist())
print("Classification accuracy on test set: %s" %test_score)
# In[ ]:
#classification using organization
df = headers_and_tags
df['Header_embedding'] = df['Header'].map(lambda x: fmodel.get_sentence_vector(str(x)))
df['Organization_embedded'] = df['Organization'].map(lambda x: fmodel.get_sentence_vector(str(x)))
#df['data_embedding'] = df['Data'].map(lambda lst: [fmodel.get_sentence_vector(str(x)) for x in lst])
#df['ngram']
print("Word embeddings extracted!\n")
#test = df['Header_embedding'] + df['Organization_embedded']
#combining so that the matrix will be in the right shape...
#not sure if this will make sense...
#print(np.shape(test))
X_train, X_test, y_train, y_test = train_test_split(df['Organization_embedded'],
df['Tag'], test_size=0.33, random_state=0)
clf = MLPClassifier(activation='relu', alpha=0.001, epsilon=1e-08, hidden_layer_sizes=150, solver='adam')
#have to ensure X_train is the right shape
#temp = X_train.to_csv('temp.csv', header = False)
#X_train = pd.read_csv('temp.csv', header = None)
#os.remove('temp.csv')
#X_train.index = X_train['0']
#X_train.columns = [x for x in range(len(X_train.columns))]
#X_train = np.reshape(X_train, (len(X_train), len(X_train.columns)))
clf.fit(X_train.values.tolist(), y_train.values.tolist())
test_score = clf.score(X_test.tolist(), y_test.tolist())
print("Classification accuracy on test set: %s" %test_score)
# In[ ]:
from nltk import ngrams
from sklearn.feature_extraction.text import CountVectorizer
ngrams = generate_n_grams(headers_and_tags['Header'], 3)
vectorizer = CountVectorizer(tokenizer=lambda doc: doc, lowercase=False)
X_vec_grams = vectorizer.fit_transform(ngrams)
print(np.shape(X_vec_grams.toarray()))
print(np.shape(X_vecs.toarray()))
# In[ ]:
#testing MLP Classifier on BOW
df_2 = headers_and_tags
df_2['BOW_counts'] = X_vecs.toarray()
X_train, X_test, y_train, y_test = train_test_split(X_vecs.toarray(),
df['Tag'], test_size=0.33, random_state=0)
clf = MLPClassifier(activation='relu', alpha=0.001, epsilon=1e-08, hidden_layer_sizes=150, solver='adam')
#have to ensure X_train is the right shape
#temp = X_train.to_csv('temp.csv', header = False)
#X_train = pd.read_csv('temp.csv', header = None)
#os.remove('temp.csv')
#X_train.index = X_train['0']
#X_train.columns = [x for x in range(len(X_train.columns))]
#X_train = np.reshape(X_train, (len(X_train), len(X_train.columns)))
clf.fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
print("Classification accuracy on test set: %s" %test_score)
# In[ ]:
#testing MLP Classifier on ngrams
df_3 = headers_and_tags
X_train, X_test, y_train, y_test = train_test_split(X_vec_grams.toarray(),
df['Tag'][0:len(X_vec_grams.toarray())], test_size=0.33, random_state=0)
clf = MLPClassifier(activation='relu', alpha=0.001, epsilon=1e-08, hidden_layer_sizes=150, solver='adam')
#have to ensure X_train is the right shape
#temp = X_train.to_csv('temp.csv', header = False)
#X_train = pd.read_csv('temp.csv', header = None)
#os.remove('temp.csv')
#X_train.index = X_train['0']
#X_train.columns = [x for x in range(len(X_train.columns))]
#X_train = np.reshape(X_train, (len(X_train), len(X_train.columns)))
clf.fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
print("Classification accuracy on test set: %s" %test_score)
# In[ ]:
#transforming data into right form
df_target = headers_and_tags
df_target['BOW_counts'] = [item for item in X_vecs.toarray()]
n = len(X_vec_grams.toarray())
df_target = df.iloc[0:n, :]
df_target['ngrams_counts'] = [item for item in X_vec_grams.toarray()]
df_target = df_target[['Header_embedding',
'Organization_embedded',
'BOW_counts',
'ngrams_counts']]
df_target.head()
#print(X_vecs.toarray()[0])
# In[ ]:
#Using VotingClassifier and Pipeline to combine all features to predict the 'most voted' output
from sklearn.ensemble import VotingClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator
# custom transformer for sklearn pipeline
class ColumnExtractor(TransformerMixin, BaseEstimator):
def __init__(self, cols):
self.cols = cols
def transform(self, X):
col_list = []
for c in self.cols:
col_list.append(X[:, c:c+1])
return np.concatenate(col_list, axis=1)
def fit(self, X, y=None):
return self
X_train, X_test, y_train, y_test = train_test_split(df_target, df['Tag']
[0:len(X_vec_grams.toarray())])
header_pipe = Pipeline([
('col_extract', ColumnExtractor(cols = 1)),
('clf', clf)
])
org_pipe = Pipeline([
('col_extract', ColumnExtractor(cols = 2)),
('clf', clf)
])
BOW_pipe = Pipeline([
('col_extract', ColumnExtractor(cols = 3)),
('clf', clf)
])
Ngram_pipe = Pipeline([
('col_extract', ColumnExtractor(cols = 4)),
('clf', clf)
])
eclf = VotingClassifier(estimators = [('p1', header_pipe),
('p2', org_pipe),
('p3', BOW_pipe),
('p4', Ngram_pipe)],
voting = 'soft',
weights = [1, 0.5, 0.5, 0.5])
eclf.fit(X_train, y_train)
eclf_score = eclf.score(X_test, y_test)
print("Classification accuracy on test set: %s" %eclf_score)
|
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + os.sep + "..")
from class_pixeltable import PixelTable
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib._png import read_png
from matplotlib.cbook import get_sample_data
import numpy as np
EXP_PATH = ".." + os.sep + "Data.pixtab"
INT_PATH = ".." + os.sep + "Interpolated_Data.pixtab"
X_AXIS = "x"
Y_AXIS = "y"
Z_AXIS = "alt"
SAVE_PATH = None
def update_3d_lines(i, tables, lines, title, x_axis, y_axis, z_axis):
title.set_text('Interpolating vs Experimental data on {}={}'.format(x_axis,
i))
for line, table in zip(lines, tables):
# NOTE: there is no .set_data() for 3 dim data...
n_table = table.df.query("{}=={}".format(x_axis, i))
x_dat = n_table[x_axis].values
y_dat = n_table[y_axis].values
z_dat = n_table[z_axis].values
line.set_data(x_dat, y_dat)
line.set_3d_properties(z_dat)
return lines
def animation_3d_plot_experimental_vs_interpolation(experimental_table,
interpolated_table,
x_axis = X_AXIS,
y_axis = Y_AXIS,
z_axis = Z_AXIS,
save_path = SAVE_PATH):
exp_n_table = experimental_table.df.query("{}==260".format(x_axis))
exp_x_dat = exp_n_table[x_axis].values
exp_y_dat = exp_n_table[y_axis].values
exp_z_dat = exp_n_table[z_axis].values
int_n_table = interpolated_table.df.query("{}==260".format(x_axis))
int_x_dat = int_n_table[x_axis].values
int_y_dat = int_n_table[y_axis].values
int_z_dat = int_n_table[z_axis].values
fig = plt.figure()
ax = p3.Axes3D(fig)
tables = [experimental_table, interpolated_table]
lines = [ax.plot(exp_x_dat, exp_y_dat, exp_z_dat, "r*",
markersize=4, label = "Experimental")[0],
ax.plot(int_x_dat, int_y_dat, int_z_dat, "b",
linewidth=1, label = "Interpolation")[0]]
# Setting the axes properties
ax.set_xlim3d([0.0, 1500.0])
ax.set_xlabel('X Pixel')
ax.set_ylim3d([0.0, 900.0])
ax.set_ylabel('Y Pixel')
ax.set_zlim3d([0.0, 100.0])
ax.set_zlabel('Altitude')
title = ax.set_title('Interpolating vs Experimental data')
plt.legend()
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_3d_lines, range(261,1200),
fargs=(tables, lines, title,
x_axis, y_axis, z_axis),
interval=2, blit=False)
if save_path:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Carlos Quezada'), bitrate=1800)
line_ani.save("animations" + os.sep + save_path, writer = writer)
plt.show()
return line_ani
if __name__=="__main__":
experimental_table = PixelTable(data_path = EXP_PATH)
interpolated_table = PixelTable(data_path = INT_PATH)
animation_3d_plot_experimental_vs_interpolation(experimental_table, interpolated_table)
# fn = get_sample_data(cwd + os.sep + "background.png", asfileobj=False)
# arr = read_png(fn)
# # 10 is equal length of x and y axises of your surface
# stepX, stepY = 1549. / arr.shape[0], 1041. / arr.shape[1]
#
# X1 = np.arange(0, 1549, 1.)
# Y1 = np.arange(0, 1041, 1.)
# X1, Y1 = np.meshgrid(X1, Y1)
# # stride args allows to determine image quality
# # stride = 1 work slow
# print("ok")
# ax.plot_surface(X1, Y1, np.atleast_2d(0.0), rstride=10, cstride=10, facecolors=arr)
|
# Imports
import models
from flask import Blueprint, request, jsonify
from playhouse.shortcuts import model_to_dict
# ------------------------------------------------------------------------------------------------------
# Blueprints
park = Blueprint('park', 'park', url_prefix="/api/v1")
# ------------------------------------------------------------------------------------------------------
# Decorations and Functions
@park.route('/', methods=["GET"])
def get_all_parks():
try:
parks = [model_to_dict(park) for park in models.Park.select()]
return jsonify(data=parks, status={"code": 200, "message": "Success"})
except models.DoesNotExist:
return jsonify(data={}, status={"code": 401, "message": "There was an error getting the resource"})
@park.route('/', methods=["POST"]) # may not be working because we haven't created any parks
def create_parks():
payload = request.get_json()
print(payload, 'payload', type(payload), 'type')
park_dict = model_to_dict(park)
return jsonify(data=park_dict, status={"code": 201, "message": "Success"})
@park.route('/<id>', methods=["GET"])
def get_one_park(id):
park = models.Park.get_by_id(id)
return jsonify(data=model_to_dict(park), status={"code": 200, "message": "Success"})
@park.route('/<id>', methods=["PUT"])
def update_park(id):
payload = request.get_json()
query = models.Park.update(**payload).where(models.Park.id == id)
query.execute()
update_park = models.Park.get_by_id(id)
return jsonify(data=model_to_dict(updated_park), status={"code": 200, "message": "Success"})
@park.route('/<id>', methods=["Delete"])
def delete_park(id):
query = models.Park.delete().where(models.Park.id == id)
query.execute()
return jsonify(data='resources successfully deleted', status={"code": 200, "message": "Resource Deleted"})
|
from django.contrib import admin
from censo.models import *
admin.site.register(Encuestador)
admin.site.register(Sector)
admin.site.register(Colonia)
admin.site.register(Parentesco)
admin.site.register(Religion)
admin.site.register(Sexo)
admin.site.register(Calle)
admin.site.register(Encuesta)
admin.site.register(Familia)
admin.site.register(Persona) |
__all__ = ["response", "repeat", "replay"]
class EventHook:
def __init__(self):
self._handlers = []
def __iadd__(self, other):
self._handlers.append(other)
return self
def __isub__(self, other):
self._handlers.remove(other)
return self
def fire(self, **kwargs):
for handler in self._handlers:
handler(**kwargs)
repeat = EventHook() # arguments: parameters
replay = EventHook()
|
from contracts import contract
__all__ = ['PlanningResult']
class PlanningResult(object):
""" Results of planning. (more fields might be added in the future) """
@contract(success='bool', plan='None|seq(int)', status='None|str', extra='dict')
def __init__(self, success, plan, status, extra={}):
'''
:param success: True if planning was succesful.
:param plan: The plan as a sequence of integer, or None.
:param status: Short status line for debugging purposes.
:param extra: Extra information used for visualization
'''
self.success = success
if plan is not None:
plan = tuple(plan)
self.plan = plan
self.status = status
self.extra = extra
|
from mrjob.job import MRJob
from mrjob.step import MRStep
from itertools import tee
import re
import sys
WORD_RE = re.compile(r"[\w']+")
class MRWordProbability(MRJob):
def steps(self):
return [
# Pull strings out of the csv
MRStep(mapper=self.mapper_pull_csv),
# Produce bigrams from the string
MRStep(mapper=self.mapper_get_bigrams,
combiner=self.combiner_count_bigrams,
reducer=self.reducer_count_bigrams),
# Calculate percents and most common occurences
MRStep(reducer=self.reducer_calculate_percents)
]
def mapper_pull_csv(self, _, line):
if(line[0] != '"'):
yield (None, line[line.find(","):].lower())
def mapper_get_bigrams(self, _, line):
prevWord = ""
# Use regex to find words
for word in WORD_RE.findall(line):
if(prevWord != ""):
yield ((prevWord, word), 1)
prevWord = word
def combiner_count_bigrams(self, word, counts):
yield (word, sum(counts))
def reducer_count_bigrams(self, word, counts):
first_word, second_word = word
yield first_word, (sum(counts), second_word)
def mostUsed(self, x):
num, word = x
return num
def reducer_calculate_percents(self, word, pairs):
total = 0
pairs, secondPairs = tee(pairs)
pairs, sortedPairs = tee(pairs)
for pair in pairs:
tmpCnt, _ = pair
total = total + tmpCnt
probabilityList = sorted(secondPairs, key=self.mostUsed, reverse = True)
for anotherPair in probabilityList:
word_count, word_key = anotherPair
yield (word, word_key), ((float(word_count) / total), word_count)
if (word == "my"):
for i in range(10):
if i == len(probabilityList):
break
word_count, word_key = probabilityList[i]
yield 'Most used number ' + str(i+1), ((word, word_key), word_count / total, word_count)
# Run the program
if __name__ == '__main__':
MRWordProbability.run() |
from BrickPi import * #import BrickPi.py file to use BrickPi operations
import math
import time
BrickPiSetup() # setup the serial port for sudo su communication
BrickPiSetupSensors() #Send the properties of sensors to BrickPi
BrickPiUpdateValues()
d=56
O=math.pi*d
BrickPi.EncoderOffset[PORT_C] = BrickPi.Encoder[PORT_C]
BrickPi.EncoderOffset[PORT_D] = BrickPi.Encoder[PORT_D]
print BrickPi.Encoder[PORT_C]
print BrickPi.EncoderOffset[PORT_C]
print BrickPi.Encoder[PORT_D]
print BrickPi.EncoderOffset[PORT_D]
motorRotateDegree([150,150],[360,360],[PORT_D,PORT_C])
print BrickPi.Encoder[PORT_C]
print BrickPi.EncoderOffset[PORT_C]
print BrickPi.Encoder[PORT_D]
print BrickPi.EncoderOffset[PORT_D]
|
# Use este codigo como ponto de partida
# Leitura de valores de entrada
var = input("Arctic Monkeys")
# Impressao de saidas: print(var)
print(var) |
from pyparsing import *
unicode_char = Forward()
unicode_letter = Forward()
unicode_digit = Forward()
letter = Forward()
decimal_digit = Forward()
octal_digit = Forward()
hex_digit = Forward()
identifier = Forward()
int_lit = Forward()
decimal_lit = Forward()
octal_lit = Forward()
hex_lit = Forward()
float_lit = Forward()
decimals = Forward()
exponent = Forward()
char_lit = Forward()
unicode_value = Forward()
byte_value = Forward()
octal_byte_value = Forward()
hex_byte_value = Forward()
little_u_value = Forward()
big_u_value = Forward()
escaped_char = Forward()
string_lit = Forward()
raw_string_lit = Forward()
interpreted_string_lit = Forward()
Type = Forward()
TypeName = Forward()
TypeLit = Forward()
ArrayType = Forward()
ArrayLength = Forward()
ElementType = Forward()
SliceType = Forward()
StructType = Forward()
FieldDecl = Forward()
AnonymousField = Forward()
Tag = Forward()
PointerType = Forward()
BaseType = Forward()
FunctionType = Forward()
Signature = Forward()
Result = Forward()
Parameters = Forward()
ParameterList = Forward()
ParameterDecl = Forward()
InterfaceType = Forward()
MethodSpec = Forward()
MethodName = Forward()
InterfaceTypeName = Forward()
MapType = Forward()
KeyType = Forward()
ChannelType = Forward()
Channel = Forward()
SendChannel = Forward()
RecvChannel = Forward()
Block = Forward()
Declaration = Forward()
TopLevelDecl = Forward()
ConstDecl = Forward()
ConstSpec = Forward()
IdentifierList = Forward()
ExpressionList = Forward()
TypeDecl = Forward()
TypeSpec = Forward()
VarDecl = Forward()
VarSpec = Forward()
ShortVarDecl = Forward()
FunctionDecl = Forward()
Body = Forward()
MethodDecl = Forward()
Receiver = Forward()
BaseTypeName = Forward()
Operand = Forward()
Literal = Forward()
BasicLit = Forward()
QualifiedIdent = Forward()
CompositeLit = Forward()
LiteralType = Forward()
ElementList = Forward()
Element = Forward()
Key = Forward()
FieldName = Forward()
ElementIndex = Forward()
Value = Forward()
FunctionLit = Forward()
PrimaryExpr = Forward()
Selector = Forward()
Index = Forward()
Slice = Forward()
TypeAssertion = Forward()
Call = Forward()
Expression = Forward()
UnaryExpr = Forward()
binary_op = Forward()
log_op = Forward()
com_op = Forward()
rel_op = Forward()
add_op = Forward()
mul_op = Forward()
unary_op = Forward()
MethodExpr = Forward()
ReceiverType = Forward()
Conversion = Forward()
Statement = Forward()
SimpleStmt = Forward()
EmptyStmt = Forward()
LabeledStmt = Forward()
Label = Forward()
ExpressionStmt = Forward()
IncDecStmt = Forward()
Assignment = Forward()
assign_op = Forward()
IfStmt = Forward()
SwitchStmt = Forward()
ExprSwitchStmt = Forward()
ExprCaseClause = Forward()
ExprSwitchCase = Forward()
TypeSwitchStmt = Forward()
TypeSwitchGuard = Forward()
TypeCaseClause = Forward()
TypeSwitchCase = Forward()
TypeList = Forward()
ForStmt = Forward()
Condition = Forward()
ForClause = Forward()
InitStmt = Forward()
PostStmt = Forward()
RangeClause = Forward()
GoStmt = Forward()
SelectStmt = Forward()
CommClause = Forward()
CommCase = Forward()
SendExpr = Forward()
RecvExpr = Forward()
ReturnStmt = Forward()
BreakStmt = Forward()
ContinueStmt = Forward()
GotoStmt = Forward()
FallthroughStmt = Forward()
DeferStmt = Forward()
BuiltinCall = Forward()
BuiltinArgs = Forward()
SourceFile = Forward()
PackageClause = Forward()
PackageName = Forward()
ImportDecl = Forward()
ImportSpec = Forward()
ImportPath = Forward()
#--------------------------------------------------------------------------------
unicode_char << ( Literal("a") | Literal("b") | Literal("c") | Literal("d") | Literal("e") | Literal("f") | Literal("g") | Literal("h") | Literal("i") | Literal("j") | Literal("k") | Literal("l") | Literal("m") | Literal("n") | Literal("o") | Literal("p") | Literal("q") | Literal("r") | Literal("s") | Literal("t") | Literal("u") | Literal("v") | Literal("w") | Literal("x") | Literal("y") | Literal("z") | Literal("A") | Literal("B") | Literal("C") | Literal("D") | Literal("E") | Literal("F") | Literal("G") | Literal("H") | Literal("I") | Literal("J") | Literal("K") | Literal("L") | Literal("M") | Literal("N") | Literal("O") | Literal("P") | Literal("Q") | Literal("R") | Literal("S") | Literal("T") | Literal("U") | Literal("V") | Literal("W") | Literal("X") | Literal("Y") | Literal("Z") | Literal("0") | Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") | Literal("8") | Literal("9") )
#--------------------------------------------------------------------------------
unicode_letter << ( Literal("a") | Literal("b") | Literal("c") | Literal("d") | Literal("e") | Literal("f") | Literal("g") | Literal("h") | Literal("i") | Literal("j") | Literal("k") | Literal("l") | Literal("m") | Literal("n") | Literal("o") | Literal("p") | Literal("q") | Literal("r") | Literal("s") | Literal("t") | Literal("u") | Literal("v") | Literal("w") | Literal("x") | Literal("y") | Literal("z") | Literal("A") | Literal("B") | Literal("C") | Literal("D") | Literal("E") | Literal("F") | Literal("G") | Literal("H") | Literal("I") | Literal("J") | Literal("K") | Literal("L") | Literal("M") | Literal("N") | Literal("O") | Literal("P") | Literal("Q") | Literal("R") | Literal("S") | Literal("T") | Literal("U") | Literal("V") | Literal("W") | Literal("X") | Literal("Y") | Literal("Z") )
#--------------------------------------------------------------------------------
unicode_digit << ( Literal("0") | Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") | Literal("8") | Literal("9") )
#--------------------------------------------------------------------------------
letter << ( unicode_letter | Literal("_") )
#--------------------------------------------------------------------------------
decimal_digit << ( Literal("0") | Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") | Literal("8") | Literal("9") )
#--------------------------------------------------------------------------------
octal_digit << ( Literal("0") | Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") )
#--------------------------------------------------------------------------------
hex_digit << ( Literal("0") | Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") | Literal("8") | Literal("9") | Literal("a") | Literal("b") | Literal("c") | Literal("d") | Literal("e") | Literal("f") | Literal("A") | Literal("B") | Literal("C") | Literal("D") | Literal("E") | Literal("F") )
#--------------------------------------------------------------------------------
identifier << ( letter + ZeroOrMore( letter | unicode_digit ) )
#--------------------------------------------------------------------------------
int_lit << ( decimal_lit | octal_lit | hex_lit )
#--------------------------------------------------------------------------------
decimal_lit << ( ( Literal("1") | Literal("2") | Literal("3") | Literal("4") | Literal("5") | Literal("6") | Literal("7") | Literal("8") | Literal("9") ) + ZeroOrMore( decimal_digit ) )
#--------------------------------------------------------------------------------
octal_lit << ( Literal("0") + ZeroOrMore( octal_digit ) )
#--------------------------------------------------------------------------------
hex_lit << ( Literal("0") + ( Literal("x") | Literal("X") ) + hex_digit + ZeroOrMore( hex_digit ) )
#--------------------------------------------------------------------------------
float_lit << ( decimals + Literal(".") + Optional( decimals ) + Optional( exponent ) | decimals + exponent | Literal(".") + decimals + Optional( exponent ) )
#--------------------------------------------------------------------------------
decimals << ( decimal_digit + ZeroOrMore( decimal_digit ) )
#--------------------------------------------------------------------------------
exponent << ( ( Literal("e") | Literal("E") ) + Optional( Literal("+") | Literal("-") ) + decimals )
#--------------------------------------------------------------------------------
char_lit << ( Literal("\'") + ( unicode_value | byte_value ) + Literal("\'") )
#--------------------------------------------------------------------------------
unicode_value << ( unicode_char | little_u_value | big_u_value | escaped_char )
#--------------------------------------------------------------------------------
byte_value << ( octal_byte_value | hex_byte_value )
#--------------------------------------------------------------------------------
octal_byte_value << ( Literal("\\") + octal_digit + octal_digit + octal_digit )
#--------------------------------------------------------------------------------
hex_byte_value << ( Literal("\\") + Literal("x") + hex_digit + hex_digit )
#--------------------------------------------------------------------------------
little_u_value << ( Literal("\\") + Literal("u") + hex_digit + hex_digit + hex_digit + hex_digit )
#--------------------------------------------------------------------------------
big_u_value << ( Literal("\\") + Literal("U") + hex_digit + hex_digit + hex_digit + hex_digit + hex_digit + hex_digit + hex_digit + hex_digit )
#--------------------------------------------------------------------------------
escaped_char << ( Literal("\\") + ( Literal("a") | Literal("b") | Literal("f") | Literal("n") | Literal("r") | Literal("t") | Literal("v") | Literal("\\") | Literal("\'") | Literal('`"`') ) )
#--------------------------------------------------------------------------------
string_lit << ( raw_string_lit | interpreted_string_lit )
#--------------------------------------------------------------------------------
raw_string_lit << ( Literal("`") + ZeroOrMore( unicode_char ) + Literal("`") )
#--------------------------------------------------------------------------------
interpreted_string_lit << ( Literal(`"`) + ZeroOrMore( unicode_value | byte_value ) + Literal(`"`) )
#--------------------------------------------------------------------------------
Type << ( TypeName | TypeLit | Literal("(") + Type + Literal(")") )
#--------------------------------------------------------------------------------
TypeName << ( QualifiedIdent )
#--------------------------------------------------------------------------------
TypeLit << ( ArrayType | StructType | PointerType | FunctionType | InterfaceType | SliceType | MapType | ChannelType )
#--------------------------------------------------------------------------------
ArrayType << ( Literal("[") + ArrayLength + Literal("]") + ElementType )
#--------------------------------------------------------------------------------
ArrayLength << ( Expression )
#--------------------------------------------------------------------------------
ElementType << ( Type )
#--------------------------------------------------------------------------------
SliceType << ( Literal("[") + Literal("]") + ElementType )
#--------------------------------------------------------------------------------
StructType << ( Literal("struct") + Literal("{") + ZeroOrMore( FieldDecl + Literal(";") ) + Literal("}") )
#--------------------------------------------------------------------------------
FieldDecl << ( ( IdentifierList + Type | AnonymousField ) + Optional( Tag ) )
#--------------------------------------------------------------------------------
AnonymousField << ( Optional( Literal("*") ) + TypeName )
#--------------------------------------------------------------------------------
Tag << ( string_lit )
#--------------------------------------------------------------------------------
PointerType << ( Literal("*") + BaseType )
#--------------------------------------------------------------------------------
BaseType << ( Type )
#--------------------------------------------------------------------------------
FunctionType << ( Literal("func") + Signature )
#--------------------------------------------------------------------------------
Signature << ( Parameters + Optional( Result ) )
#--------------------------------------------------------------------------------
Result << ( Parameters | Type )
#--------------------------------------------------------------------------------
Parameters << ( Literal("(") + Optional( ParameterList + Optional( Literal(",") ) ) + Literal(")") )
#--------------------------------------------------------------------------------
ParameterList << ( ParameterDecl + ZeroOrMore( Literal(",") + ParameterDecl ) )
#--------------------------------------------------------------------------------
ParameterDecl << ( Optional( IdentifierList ) + ( Type | Literal("...") ) )
#--------------------------------------------------------------------------------
InterfaceType << ( Literal("interface") + Literal("{") + ZeroOrMore( MethodSpec + Literal(";") ) + Literal("}") )
#--------------------------------------------------------------------------------
MethodSpec << ( MethodName + Signature | InterfaceTypeName )
#--------------------------------------------------------------------------------
MethodName << ( identifier )
#--------------------------------------------------------------------------------
InterfaceTypeName << ( TypeName )
#--------------------------------------------------------------------------------
MapType << ( Literal("map") + Literal("[") + KeyType + Literal("]") + ElementType )
#--------------------------------------------------------------------------------
KeyType << ( Type )
#--------------------------------------------------------------------------------
ChannelType << ( Channel | SendChannel | RecvChannel )
#--------------------------------------------------------------------------------
Channel << ( Literal("chan") + ElementType )
#--------------------------------------------------------------------------------
SendChannel << ( Literal("chan") + Literal("<-") + ElementType )
#--------------------------------------------------------------------------------
RecvChannel << ( Literal("<-") + Literal("chan") + ElementType )
#--------------------------------------------------------------------------------
Block << ( Literal("{") + ZeroOrMore( Statement + Literal(";") ) + Literal("}") )
#--------------------------------------------------------------------------------
Declaration << ( ConstDecl | TypeDecl | VarDecl )
#--------------------------------------------------------------------------------
TopLevelDecl << ( Declaration | FunctionDecl | MethodDecl )
#--------------------------------------------------------------------------------
ConstDecl << ( Literal("const") + ( ConstSpec | Literal("(") + ZeroOrMore( ConstSpec + Literal(";") ) + Literal(")") ) )
#--------------------------------------------------------------------------------
ConstSpec << ( IdentifierList + Optional( Optional( Type ) + Literal("=") + ExpressionList ) )
#--------------------------------------------------------------------------------
IdentifierList << ( identifier + ZeroOrMore( Literal(",") + identifier ) )
#--------------------------------------------------------------------------------
ExpressionList << ( Expression + ZeroOrMore( Literal(",") + Expression ) )
#--------------------------------------------------------------------------------
TypeDecl << ( Literal("type") + ( TypeSpec | Literal("(") + ZeroOrMore( TypeSpec + Literal(";") ) + Literal(")") ) )
#--------------------------------------------------------------------------------
TypeSpec << ( identifier + Type )
#--------------------------------------------------------------------------------
VarDecl << ( Literal("var") + ( VarSpec | Literal("(") + ZeroOrMore( VarSpec + Literal(";") ) + Literal(")") ) )
#--------------------------------------------------------------------------------
VarSpec << ( IdentifierList + ( Type + Optional( Literal("=") + ExpressionList ) | Literal("=") + ExpressionList ) )
#--------------------------------------------------------------------------------
ShortVarDecl << ( IdentifierList + Literal(":=") + ExpressionList )
#--------------------------------------------------------------------------------
FunctionDecl << ( Literal("func") + identifier + Signature + Optional( Body ) )
#--------------------------------------------------------------------------------
Body << ( Block )
#--------------------------------------------------------------------------------
MethodDecl << ( Literal("func") + Receiver + MethodName + Signature + Optional( Body ) )
#--------------------------------------------------------------------------------
Receiver << ( Literal("(") + Optional( identifier ) + Optional( Literal("*") ) + BaseTypeName + Literal(")") )
#--------------------------------------------------------------------------------
BaseTypeName << ( identifier )
#--------------------------------------------------------------------------------
Operand << ( Literal | QualifiedIdent | MethodExpr | Literal("(") + Expression + Literal(")") )
#--------------------------------------------------------------------------------
Literal << ( BasicLit | CompositeLit | FunctionLit )
#--------------------------------------------------------------------------------
BasicLit << ( int_lit | float_lit | char_lit | string_lit )
#--------------------------------------------------------------------------------
QualifiedIdent << ( Optional( PackageName + Literal(".") ) + identifier )
#--------------------------------------------------------------------------------
CompositeLit << ( LiteralType + Literal("{") + Optional( ElementList + Optional( Literal(",") ) ) + Literal("}") )
#--------------------------------------------------------------------------------
LiteralType << ( StructType | ArrayType | Literal("[") + Literal("...") + Literal("]") + ElementType | SliceType | MapType | TypeName | Literal("(") + LiteralType + Literal(")") )
#--------------------------------------------------------------------------------
ElementList << ( Element + ZeroOrMore( Literal(",") + Element ) )
#--------------------------------------------------------------------------------
Element << ( Optional( Key + Literal(":") ) + Value )
#--------------------------------------------------------------------------------
Key << ( FieldName | ElementIndex )
#--------------------------------------------------------------------------------
FieldName << ( identifier )
#--------------------------------------------------------------------------------
ElementIndex << ( Expression )
#--------------------------------------------------------------------------------
Value << ( Expression )
#--------------------------------------------------------------------------------
FunctionLit << ( FunctionType + Body )
#--------------------------------------------------------------------------------
PrimaryExpr << ( Operand | Conversion | BuiltinCall | PrimaryExpr + Selector | PrimaryExpr + Index | PrimaryExpr + Slice | PrimaryExpr + TypeAssertion | PrimaryExpr + Call )
#--------------------------------------------------------------------------------
Selector << ( Literal(".") + identifier )
#--------------------------------------------------------------------------------
Index << ( Literal("[") + Expression + Literal("]") )
#--------------------------------------------------------------------------------
Slice << ( Literal("[") + Expression + Literal(":") + Optional( Expression ) + Literal("]") )
#--------------------------------------------------------------------------------
TypeAssertion << ( Literal(".") + Literal("(") + Type + Literal(")") )
#--------------------------------------------------------------------------------
Call << ( Literal("(") + Optional( ExpressionList + Optional( Literal(",") ) ) + Literal(")") )
#--------------------------------------------------------------------------------
Expression << ( UnaryExpr | Expression + binary_op + UnaryExpr )
#--------------------------------------------------------------------------------
UnaryExpr << ( PrimaryExpr | unary_op + UnaryExpr )
#--------------------------------------------------------------------------------
binary_op << ( log_op | com_op | rel_op | add_op | mul_op )
#--------------------------------------------------------------------------------
log_op << ( Literal("||") | Literal("&&") )
#--------------------------------------------------------------------------------
com_op << ( Literal("<-") )
#--------------------------------------------------------------------------------
rel_op << ( Literal("==") | Literal("!=") | Literal("<") | Literal("<=") | Literal(">") | Literal(">=") )
#--------------------------------------------------------------------------------
add_op << ( Literal("+") | Literal("-") | Literal("|") | Literal("^") )
#--------------------------------------------------------------------------------
mul_op << ( Literal("*") | Literal("/") | Literal("%") | Literal("<<") | Literal(">>") | Literal("&") | Literal("&^") )
#--------------------------------------------------------------------------------
unary_op << ( Literal("+") | Literal("-") | Literal("!") | Literal("^") | Literal("*") | Literal("&") | Literal("<-") )
#--------------------------------------------------------------------------------
MethodExpr << ( ReceiverType + Literal(".") + MethodName )
#--------------------------------------------------------------------------------
ReceiverType << ( TypeName | Literal("(") + Literal("*") + TypeName + Literal(")") )
#--------------------------------------------------------------------------------
Conversion << ( LiteralType + Literal("(") + Expression + Literal(")") )
#--------------------------------------------------------------------------------
Statement << ( Declaration | LabeledStmt | SimpleStmt | GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt | FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt | DeferStmt )
#--------------------------------------------------------------------------------
SimpleStmt << ( EmptyStmt | ExpressionStmt | IncDecStmt | Assignment | ShortVarDecl )
#--------------------------------------------------------------------------------
EmptyStmt << ( Word(""))
#--------------------------------------------------------------------------------
LabeledStmt << ( Label + Literal(":") + Statement )
#--------------------------------------------------------------------------------
Label << ( identifier )
#--------------------------------------------------------------------------------
ExpressionStmt << ( Expression )
#--------------------------------------------------------------------------------
IncDecStmt << ( Expression + ( Literal("++") | Literal("--") ) )
#--------------------------------------------------------------------------------
Assignment << ( ExpressionList + assign_op + ExpressionList )
#--------------------------------------------------------------------------------
assign_op << ( Optional( add_op | mul_op ) + Literal("=") )
#--------------------------------------------------------------------------------
IfStmt << ( Literal("if") + Optional( SimpleStmt + Literal(";") ) + Optional( Expression ) + Block + Optional( Literal("else") + Statement ) )
#--------------------------------------------------------------------------------
SwitchStmt << ( ExprSwitchStmt | TypeSwitchStmt )
#--------------------------------------------------------------------------------
ExprSwitchStmt << ( Literal("switch") + Optional( SimpleStmt + Literal(";") ) + Optional( Expression ) + Literal("{") + ZeroOrMore( ExprCaseClause ) + Literal("}") )
#--------------------------------------------------------------------------------
ExprCaseClause << ( ExprSwitchCase + Literal(":") + ZeroOrMore( Statement + Literal(";") ) )
#--------------------------------------------------------------------------------
ExprSwitchCase << ( Literal("case") + ExpressionList | Literal("default") )
#--------------------------------------------------------------------------------
TypeSwitchStmt << ( Literal("switch") + Optional( SimpleStmt + Literal(";") ) + TypeSwitchGuard + Literal("{") + ZeroOrMore( TypeCaseClause ) + Literal("}") )
#--------------------------------------------------------------------------------
TypeSwitchGuard << ( Optional( identifier + Literal(":=") ) + Expression + Literal(".") + Literal("(") + Literal("type") + Literal(")") )
#--------------------------------------------------------------------------------
TypeCaseClause << ( TypeSwitchCase + Literal(":") + ZeroOrMore( Statement + Literal(";") ) )
#--------------------------------------------------------------------------------
TypeSwitchCase << ( Literal("case") + TypeList | Literal("default") )
#--------------------------------------------------------------------------------
TypeList << ( Type + ZeroOrMore( Literal(",") + Type ) )
#--------------------------------------------------------------------------------
ForStmt << ( Literal("for") + Optional( Condition | ForClause | RangeClause ) + Block )
#--------------------------------------------------------------------------------
Condition << ( Expression )
#--------------------------------------------------------------------------------
ForClause << ( Optional( InitStmt ) + Literal(";") + Optional( Condition ) + Literal(";") + Optional( PostStmt ) )
#--------------------------------------------------------------------------------
InitStmt << ( SimpleStmt )
#--------------------------------------------------------------------------------
PostStmt << ( SimpleStmt )
#--------------------------------------------------------------------------------
RangeClause << ( ExpressionList + ( Literal("=") | Literal(":=") ) + Literal("range") + Expression )
#--------------------------------------------------------------------------------
GoStmt << ( Literal("go") + Expression )
#--------------------------------------------------------------------------------
SelectStmt << ( Literal("select") + Literal("{") + ZeroOrMore( CommClause ) + Literal("}") )
#--------------------------------------------------------------------------------
CommClause << ( CommCase + Literal(":") + ZeroOrMore( Statement + Literal(";") ) )
#--------------------------------------------------------------------------------
CommCase << ( Literal("case") + ( SendExpr | RecvExpr ) | Literal("default") )
#--------------------------------------------------------------------------------
SendExpr << ( Expression + Literal("<-") + Expression )
#--------------------------------------------------------------------------------
RecvExpr << ( Optional( Expression + ( Literal("=") | Literal(":=") ) ) + Literal("<-") + Expression )
#--------------------------------------------------------------------------------
ReturnStmt << ( Literal("return") + Optional( ExpressionList ) )
#--------------------------------------------------------------------------------
BreakStmt << ( Literal("break") + Optional( Label ) )
#--------------------------------------------------------------------------------
ContinueStmt << ( Literal("continue") + Optional( Label ) )
#--------------------------------------------------------------------------------
GotoStmt << ( Literal("goto") + Label )
#--------------------------------------------------------------------------------
FallthroughStmt << ( Literal("fallthrough") )
#--------------------------------------------------------------------------------
DeferStmt << ( Literal("defer") + Expression )
#--------------------------------------------------------------------------------
BuiltinCall << ( identifier + Literal("(") + Optional( BuiltinArgs ) + Literal(")") )
#--------------------------------------------------------------------------------
BuiltinArgs << ( Type + Optional( Literal(",") + ExpressionList ) | ExpressionList )
#--------------------------------------------------------------------------------
SourceFile << ( PackageClause + Literal(";") + ZeroOrMore( ImportDecl + Literal(";") ) + ZeroOrMore( TopLevelDecl + Literal(";") ) )
#--------------------------------------------------------------------------------
PackageClause << ( Literal("package") + PackageName )
#--------------------------------------------------------------------------------
PackageName << ( identifier )
#--------------------------------------------------------------------------------
ImportDecl << ( Literal("import") + ( ImportSpec | Literal("(") + ZeroOrMore( ImportSpec + Literal(";") ) + Literal(")") ) )
#--------------------------------------------------------------------------------
ImportSpec << ( Optional( Literal(".") | PackageName ) + ImportPath )
#--------------------------------------------------------------------------------
ImportPath << ( string_lit )
import sys
sys.setrecursionlimit(15000)
SourceFile.parseString("package main;")
|
import numpy as np
from numpy.linalg import matrix_power
A = np.array([
[-1, 4, 8],
[-9, 1, 2]
])
B = np.array([
[5, 8],
[0, -6],
[5, 6]
])
C = np.array([
[-4, 1],
[6, 5]
])
D = np.array([
[-6, 3, 1],
[8, 9, -2],
[6, -1, 5]
])
print('a')
print((A @ B).T)
print('b')
print((B @ C).T)
print('c')
print(C - C.T)
print('d')
print(D - D.T)
print('e')
print(D.T.T)
print('f')
print((2 * C).T)
print('g')
print(A.T + B)
print('h')
print(A + B.T)
print('i')
print((A.T + B).T)
print('j')
print((2 * A.T - 5 * B).T)
print('k')
print((-D).T)
print('l')
print(-D.T)
print('m')
print(matrix_power(C, 2).T)
print('n')
print(matrix_power(C.T, 2))
|
from django.urls import path, re_path
from django.conf.urls import url
from . import views
urlpatterns = [
url('menu/wrapper.html', views.wrapper),
url('menu/main.html',views.main),
url('menu/condition.html', views.condition),
url('menu/content.html', views.content),
url('menu/', views.menu),
url('condition/', views.condition),
path('user/', views.index),
url(r'^$', views.show)
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 7 09:21:48 2017
@author: obarquero
"""
from __future__ import division
import numpy as np
import zlib
from backports import lzma
import bz2
class NCD(object):
def __init__(self,x=[],y=[],compressor = []):
self.x = x
self.y = y
self.compressor = compressor
def ncd(self,x,y,compressor):
"""
Compute ncd value.
Example: ncd(x,y,zlib)
where compressor can be zlib lzma bz2
"""
xy = np.concatenate((x,y))
c_x = compressor.compress(x)
c_y = compressor.compress(y)
c_xy = compressor.compress(xy)
ncd = (len(c_xy) - np.min((len(c_x),len(c_y)))) / (np.max((len(c_x),len(c_y))))
return ncd
##ncd = (c_xy-min(cx,cy))/(max(Cx,Cy))
def __ncd(self):
xy = np.concatenate((self.x,self.y))
c_x = self.compressor.compress(self.x)
c_y = self.compressor.compress(self.y)
c_xy = self.compressor.compress(xy)
ncd = (len(c_xy) - np.min((len(c_x),len(c_y)))) / (np.max((len(c_x),len(c_y))))
return ncd
##ncd = (c_xy-min(cx,cy))/(max(Cx,Cy))
def getX(self):
return self.x
def getY(self):
return self.y
def getCompressor(self):
return self.compressor
|
import time
def ConsolePrint(string, type):#type "c" client, "s" server
clock = time.strftime('%X')
if type == "c":
print(clock, " [CLIENT]: ", string)
elif type == "s":
print(clock, " [SERVER]: ", string)
|
from __future__ import division
import time
from smbus import SMBus
import Adafruit_PCA9685
import subprocess
import math
import numpy as np
from ast import literal_eval
PI = 3.14
#Global Variables
pwm2 = Adafruit_PCA9685.PCA9685(address=0x41, busnum=1)
pwm2.set_pwm_freq(50)
bus = SMBus(1)
FL_sensor = 0
FR_sensor = 0
HL_sensor = 0
HR_sensor = 0
#Functions
def adc(add, data):
bus.write_i2c_block_data(add, 0x01,data)
adc0= bus.read_i2c_block_data(add, 0x00,2)
a= adc0[0] & 0xFFFF
b= adc0[1] & 0xFF
c= (a<<8) | b
return c
def robot_file_generate():
file_name = 'Black_dog_reference.txt'
f = open(file_name,'w')
for i in range(0,24):
f.write('line'+' '+str(i)+'\n')
f.close()
return file_name
def replace_line(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
data_map = open("Black_dog_contact_map.txt", "r")
data = data_map.read()
print data
servomin=160
servomax=524
i1=1
b=0
b0=0
dkd=0
counter = 0
Robot_file = robot_file_generate()
cc = np.zeros([12, 1])
while 1:
# motor = [342 , 342 , 244 , 342 , 342 , 244 , 244 , 244 , 244 , 344 , 344 , 244]
# FRK FRH FRA FLA FLH FLK HLH HLK HLA HRA HRK HRH
flag = 0
# Joint_name < Actual_angle_1 Actual_angle_2 | motor_pwm_1 motor_pwm_2 | encoder_value_1 encoder_value_2 >
if counter == 0:
motor_i = input("Please enster the motor to be tested: <0 - 11>: ")
mn = motor_i
pwm_input = input("Please input PWM values (150 - 525): ")
p_i = int(pwm_input)
motor = p_i
counter = 1
elif counter == 100:
counter = 0
else:
counter += 1
for i in range(0, N_L):
data_in = data[i, 1]
add = data[i, 0]
val = adc(add, data_in)
time.sleep(0.002)
if val >60000 or val < 10:
cc[data[i, 3]] = data[i, 4]
angle = data[i, 5]
if cc[mn] > 0:
motor +=5
elif cc[mn] < 0:
motor -= 5
line_num = int(2*mn + 1)
if cc[mn] > 0 or cc[mn] < 0:
line_num = int(2*mn + 1)
reference_txt = str(mn) +" , "+str(motor) + " , " + str(encoder[mn]) + " , " + str(angle)+ "\n"
replace_line(Robot_file, line_num, reference_txt)
# Leg
if mn == 0:
pwm1.set_pwm(0, 0, motor)
elif mn == 1:
pwm1.set_pwm(1, 0, motor)
elif mn == 2:
pwm1.set_pwm(3, 0, motor)
elif mn == 3:
pwm1.set_pwm(12, 0, motor)
elif mn == 4:
pwm1.set_pwm(14, 0, motor)
elif mn == 5:
pwm1.set_pwm(15, 0, motor)
elif mn == 6:
pwm2.set_pwm(1, 0, motor)
elif mn == 7:
pwm2.set_pwm(0, 0, motor)
elif mn == 8:
pwm2.set_pwm(3, 0, motor)
elif mn == 9:
pwm2.set_pwm(12, 0, motor)
elif mn == 10:
pwm2.set_pwm(15, 0, motor)
elif mn == 11:
pwm2.set_pwm(14, 0, motor)
print " -------------------------------------------"
print "motor: ",
print motor
#time.sleep(1.5)
|
#raw_input ka use kar ke 3 alag variables mein 3 integers ka input lein. Input lene ke baad inn 3 mein se sabse bade number ko print karo
first = raw_input("enter a number")# we took a input from a user in a string form
First = int(first)#we converted string into integer
second = raw_input("enter another number")# we took a input from a user in a string form
Second = int(second)#we converted string into integer
third = raw_input("enter one more number")# we took a input from a user in a string form
Third = int(third)#we converted string into integer
if First > Second and First > Third:# here we gave the condition that if first number is greater thn second and third
print First# then first nubmer will be printed
elif Second > First and Second > Third:# here we gave the con dition that if second nubmer is greater thn first and third
print Second# thn second nubmer will be printed
else:# if all the upper conditions are not true
print Third# then thisd will be printed |
from services.views.spi import SpiApi
from authentications.utils import get_correlation_id_from_username
from django.contrib import messages
from django.views.generic.base import TemplateView
from django.shortcuts import redirect
import logging
from web_admin import setup_logger,api_settings
logger = logging.getLogger(__name__)
class SPIUrlConfigurationDelete(TemplateView, SpiApi):
template_name = 'services/spi_url_configuration/delete.html'
get_config_type_url = 'api-gateway/payment/'+api_settings.API_VERSION+'/spi-url-configuration-types'
spi_url_configuration = 'api-gateway/payment/'+api_settings.API_VERSION+'/admin/spi-url-configurations/{spiUrlConfigurationId}'
logger = logger
def dispatch(self, request, *args, **kwargs):
correlation_id = get_correlation_id_from_username(self.request.user)
self.logger = setup_logger(self.request, logger, correlation_id)
return super(SPIUrlConfigurationDelete, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
self.logger.info('========== Start getting SPI url detail ==========')
context = super(SPIUrlConfigurationDelete, self).get_context_data(**kwargs)
service_command_id = kwargs.get('service_command_id')
service_id = kwargs.get('service_id')
command_id = kwargs.get('command_id')
spi_url_id = kwargs.get('spi_url_id')
spi_url_config_id = kwargs.get('spi_url_config_id')
configuration_detail, success = self._get_method(
self.spi_url_configuration.format(spiUrlConfigurationId=spi_url_config_id),
"Get spi url configuration", logger)
self.logger.info("SPI Url configuration details is [{}]".format(configuration_detail))
data, success = self._get_method(self.get_config_type_url, "", logger)
self.logger.info("SPI url configuration types {}".format(data))
self.logger.info('========== Finish getting SPI url detail ==========')
context['configuration_detail'] = configuration_detail
context['configuration_type_list'] = data
context['service_command_id'] = service_command_id
context['service_id'] = service_id
context['command_id'] = command_id
context['spi_url_id'] = spi_url_id
context['spi_url_config_id'] = spi_url_config_id
return context
def post(self, request, *args, **kwargs):
self.logger.info("========== Start deleting SPI configuration url ==========")
service_command_id = kwargs.get('service_command_id')
service_id = kwargs.get('service_id')
command_id = kwargs.get('command_id')
spi_url_id = kwargs.get('spi_url_id')
spi_url_config_id = kwargs.get('spi_url_config_id')
path = self.spi_url_configuration.format(spiUrlConfigurationId=spi_url_config_id)
data, status = self._delete_method(path, "Delete SPI Configuration Url", logger)
self.logger.info("spi url configuration types {}".format(data))
self.logger.info("========== Finish deleting SPI configuration url ==========")
if status:
type_msg = messages.SUCCESS
text_msg = 'Deleted data successfully'
else:
type_msg = messages.ERROR
text_msg = data
messages.add_message(
request,
type_msg,
text_msg
)
return redirect('services:spi_configuration_list',
service_command_id=service_command_id,
service_id=service_id,
command_id=command_id,
spiUrlId=spi_url_id)
|
from django.db import models
# Create your models here.
class TestManager(models.Manager):
def create_tweet(self,d):
tweet = self.create(**d)
return tweet
class TestModel(models.Model):
objects = TestManager()
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
number_labels = models.IntegerField(default=0)
number_labels_correct = models.IntegerField(default=0)
number_labels_incorrect = models.IntegerField(default=0)
user_email = models.EmailField(null=True)
user_id = models.IntegerField(default=0)
def __str__(self):
return 'id='+str(self.pk)+' created=str(self.created)'
def __unicode__(self):
return 'id='+str(self.pk)+' created=str(self.created)'
|
#!/usr/bin/env python
# Exercise 4
# (1) Modify reverse_lookup_old so that it builds and returns a list of all
# keys that map to v, or an empty list if there are none.
# (2) Paste in your completed functions from HW08_ex_11_02.py
# (3) Do not edit what is in main(). It should print what is returned, a list
# of the keys that map to the values passed.
##############################################################################
def reverse_lookup_old(d, v):
for k in d:
if d[k] == v:
return k
raise ValueError
def reverse_lookup_new(d, v):
hist_list = []
for k in d:
if d[k] == v:
hist_list.append(k)
return hist_list
##############################################################################
################### INSERT COMPLETED CODE FROM 11_02 BELOW: ##################
##############################################################################
def histogram_new(s):
d = dict()
for c in s:
d[c] = d.get(c, 0) + 1
return d
def get_pledge_list():
""" Opens pledge.txt and converts to a list, each item is a word in
the order it appears in the original file. returns the list.
"""
with open("pledge.txt") as fin:
pledge_list = []
for line in fin.readlines():
word_list = line.split() #split line into words
for item in word_list:
pledge_list.append(item)
return pledge_list
##############################################################################
################### INSERT COMPLETED CODE FROM 11_02 ABOVE: ##################
##############################################################################
def main(): # DO NOT CHANGE BELOW
pledge_histogram = histogram_new(get_pledge_list())
print reverse_lookup_new(pledge_histogram, 1)
print reverse_lookup_new(pledge_histogram, 9)
print reverse_lookup_new(pledge_histogram, "Python")
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
(Description)
Created on Oct 16, 2014
"""
import json
from os.path import dirname, abspath
import sys
basePath = dirname(abspath(__file__)) + '/../../'
sys.path.insert(0, '../../')
from ce1sus.helpers.common.config import Configuration
from ce1sus.controllers.admin.attributedefinitions import AttributeDefinitionController
from ce1sus.controllers.admin.conditions import ConditionController
from ce1sus.controllers.admin.objectdefinitions import ObjectDefinitionController
from ce1sus.controllers.admin.references import ReferencesController
from ce1sus.controllers.admin.user import UserController
from ce1sus.db.brokers.definitions.handlerdefinitionbroker import AttributeHandlerBroker
from ce1sus.db.classes.definitions import AttributeDefinition, ObjectDefinition
from ce1sus.db.classes.report import ReferenceDefinition
from ce1sus.db.common.session import SessionManager
__author__ = 'Weber Jean-Paul'
__email__ = 'jean-paul.weber@govcert.etat.lu'
__copyright__ = 'Copyright 2013-2014, GOVCERT Luxembourg'
__license__ = 'GPL v3+'
if __name__ == '__main__':
# want parent of parent directory aka ../../
# setup cherrypy
#
ce1susConfigFile = basePath + 'config/ce1sus.conf'
config = Configuration(ce1susConfigFile)
print "Getting config"
config.get_section('Logger')['log'] = False
print "Creating DB"
session = SessionManager(config)
mysql_session = session.connector.get_direct_session()
session = mysql_session
# add attributes definitions
obj_ctrl = ObjectDefinitionController(config, session)
ref_ctrl = ReferencesController(config, session)
attr_ctrl = AttributeDefinitionController(config, session)
handler_broker = AttributeHandlerBroker(session)
cond_ctrl = ConditionController(config, session)
# user
user_controller = UserController(config, session)
user_uuid = config.get('ce1sus', 'maintenaceuseruuid', None)
user = user_controller.get_user_by_uuid(user_uuid)
all_bd_defs = attr_ctrl.attr_def_broker.get_all()
attr_defs = dict()
for attr_def in all_bd_defs:
attr_defs[attr_def.uuid] = attr_def
all_bd_defs = obj_ctrl.get_all_object_definitions()
obj_defs = dict()
for obj_def in all_bd_defs:
obj_defs[obj_def.uuid] = obj_def
all_bd_defs = ref_ctrl.get_reference_definitions_all()
ref_defs = dict()
for ref_def in all_bd_defs:
ref_defs[ref_def.uuid] = ref_def
with open('../install/database/attributes.json') as data_file:
attrs = json.load(data_file)
print "Updating attribute definitions"
for attr_json in attrs:
# check if exists
uuid = attr_json.get('identifier')
attr_def = attr_defs.get(uuid, None)
if attr_def is None:
attr_def = AttributeDefinition()
attr_def.uuid = uuid
new_one = True
else:
new_one = False
attr_def.populate(attr_json)
attr_def.uuid = attr_json.get('identifier')
attributehandler_uuid = attr_json.get('attributehandler_id', None)
attribute_handler = handler_broker.get_by_uuid(attributehandler_uuid)
attr_def.attributehandler_id = attribute_handler.identifier
attr_def.attribute_handler = attribute_handler
value_type_uuid = attr_json.get('type_id', None)
value_type = attr_ctrl.type_broker.get_by_uuid(value_type_uuid)
attr_def.value_type_id = value_type.identifier
default_condition_uuid = attr_json.get('default_condition_id', None)
default_condition = cond_ctrl.get_condition_by_uuid(default_condition_uuid)
attr_def.default_condition_id = default_condition.identifier
if new_one:
attr_ctrl.insert_attribute_definition(attr_def, user, False)
else:
attr_ctrl.attr_def_broker.update(attr_def, False)
attr_defs[attr_def.uuid] = attr_def
attr_ctrl.attr_def_broker.do_commit(False)
# add object definitions and make relations
with open('../install/database/objects.json') as data_file:
objs = json.load(data_file)
obj_ctrl = ObjectDefinitionController(config, session)
print "Updating object definitions"
for obj_json in objs:
uuid = obj_json.get('identifier')
obj_def = obj_defs.get(uuid, None)
if obj_def is None:
obj_def = ObjectDefinition()
obj_def.uuid = uuid
new = True
else:
new = False
obj_def.populate(obj_json)
attrs = obj_json.get('attributes')
# clear all relations in case
obj_def.attributes = list()
for attr in attrs:
attr_uuid = attr.get('identifier')
obj_def.attributes.append(attr_defs[attr_uuid])
if new:
obj_ctrl.insert_object_definition(obj_def, user, False)
else:
obj_ctrl.obj_def_broker.update(obj_def, False)
obj_ctrl.obj_def_broker.do_commit(False)
ref_ctrl = ReferencesController(config, session)
with open('../install/database/references.json') as data_file:
references = json.load(data_file)
print "Updating references definitions"
for ref_json in references:
uuid = ref_json.get('identifier')
ref_def = ref_defs.get(uuid, None)
if ref_def is None:
ref_def = ReferenceDefinition()
ref_def.uuid = uuid
new = True
else:
new = False
ref_def.populate(ref_json)
referencehandler_uuid = ref_json.get('referencehandler_id', None)
reference_handler = ref_ctrl.reference_broker.get_handler_by_uuid(referencehandler_uuid)
ref_def.referencehandler_id = reference_handler.identifier
ref_def.reference_handler = reference_handler
if new:
ref_ctrl.insert_reference_definition(ref_def, user, False)
else:
ref_ctrl.update_reference(ref_def, user, False)
ref_ctrl.reference_definition_broker.do_commit(True)
|
import csv
import io
import os
import tkinter as tk
import xml.etree.cElementTree as xmlET
from tkinter import filedialog
import tokenizeJudgements
class JudgementEntity:
def __init__(self, id, text, label=""):
self._id = id
self._text = text
self._label = label
def runScript():
root = tk.Tk()
root.withdraw()
directory = filedialog.askdirectory()
list_of_files = []
for (_, dirnames, _) in os.walk(directory):
for dirname in dirnames:
files=[(directory + os.path.sep + dirname + os.path.sep + e) for e in os.listdir(directory + os.path.sep + dirname) if e.endswith('.xml')]
list_of_files = list_of_files + files
entities = {}
print('Processing files started.')
for filename in list_of_files:
with io.open(filename, "r", encoding="utf-8") as xml_file:
xmlRoot = xmlET.parse(filename).getroot()
textTag = xmlRoot.find("TEXT")
text = tokenizeJudgements.splitJudgement(textTag.text)
judgement_id = filename.split('_')[-1][:-4]
judgement_type = filename.split(os.path.sep)[-2]
if judgement_id in entities:
if judgement_type == 'conditional':
entities[judgement_id]._label = 'conditional'
elif judgement_type == 'verdict':
entities[judgement_id]._label = 'verdict'
elif judgement_type == 'acquittal':
entities[judgement_id]._label = 'acquittal'
elif judgement_type == 'rejected':
entities[judgement_id]._label = 'rejected'
else:
print('Error')
else:
judgement = JudgementEntity(id=judgement_id, text=text)
if judgement_type == 'conditional':
judgement._label = 'conditional'
elif judgement_type == 'verdict':
judgement._label = 'verdict'
elif judgement_type == 'acquittal':
judgement._label = 'acquittal'
elif judgement_type == 'rejected':
judgement._label = 'rejected'
else:
print('Error')
entities[judgement_id] = judgement
with open(directory + os.path.sep + 'out.csv', 'w', encoding='utf-8') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL, delimiter=',')
wr.writerow(['ID', 'TEXT', 'LABEL'])
for key, value in entities.items():
wr.writerow([key, value._text, value._label])
print('CSV output generated.')
if __name__ == "__main__":
runScript() |
"""
SWARM Trough detection:
- 3 point median filter
- cut into 45-75 MLAT segments
- background = sliding window of 480 points
- check detrended logarithmic density to see if it has negative peak that both corresponds to the local Ne
minimum within "the window" and lower than a threshold of -0.3
- mark poleward and equatorward transitions back to 0 as the walls
- filter out troughs smaller than 1 degree wide and larger than 18 degrees wide
- if more than one trough is identified in a segment, choose the equatorward one
Terms:
- segment: "orbital segment" from Aa 2020, section of data from 45 - 75 mlat
- orbit: once around the globe by the satellite
- interval: section of data surrounding a tec map (default is 2 hours on either side, total of 5 hours)
Example:
```
data, times = io.get_swarm_data(start_time, end_time, sat)
times, log_ne, background, mlat, mlt = swarm.process_swarm_data_interval(data, times)
segment = swarm.get_closest_segment(times, mlat, tec_time, 45, 75)
dne = log_ne - background
smooth_dne = utils.centered_bn_func(bn.move_mean, dne, 10, pad=True, min_count=1)
trough = swarm.find_troughs_in_segment(mlat[segment], smooth_dne[segment])
if trough:
min_idx, edge_1, edge_2 = trough
...
```
"""
SATELLITES = {'swarm': ('A', 'B', 'C'), 'dmsp': ('dmsp15', 'dmsp16', 'dmsp17', 'dmsp18')}
DIRECTIONS = {'up': (45, 75), 'down': (75, 45)}
import numpy as np
import bottleneck as bn
import pandas
from scipy.interpolate import interp1d
from ttools import io, utils
def fix_latlon(lat, lon):
"""Fix errors arising from using a moving average filter on longitude near the 180/-180 crossover.
Parameters
----------
lat: numpy.ndarray[float]
lon: numpy.ndarray[float]
Returns
-------
fixed_lat, fixed_lon: numpy.ndarray[float]
"""
fixed_lat = lat.copy()
fixed_lon = lon.copy()
theta = np.radians(lon)
r = 90 - lat
x = r * np.cos(theta)
y = r * np.sin(theta)
xp = utils.centered_bn_func(bn.move_median, x, 21, pad=True, min_count=5)
yp = utils.centered_bn_func(bn.move_median, y, 21, pad=True, min_count=5)
d = np.hypot(x - xp, y - yp)
bad, = np.nonzero(d > 10 * bn.nanmean(d))
new_x = xp[bad]
new_y = yp[bad]
new_lat = 90 - np.hypot(new_x, new_y)
new_lon = np.degrees(np.arctan2(new_y, new_x))
fixed_lon[bad] = new_lon
fixed_lat[bad] = new_lat
print(f"Fixed {bad.shape[0]} bad coordinates")
return fixed_lat, fixed_lon
def process_swarm_data_interval(data, times, median_window=3, mean_window=481):
"""take log of Ne, perform moving median filter, estimate background using moving average filter
Parameters
----------
data, times: numpy.ndarray
median_window, mean_window: int
Returns
-------
times, log_ne, background, mlat, mlt
"""
ne = data['n']
ne[ne <= 0] = np.nan # get rid of zeros
log_ne = np.log10(ne) # take log
log_ne = utils.centered_bn_func(bn.move_median, log_ne, median_window, min_count=1) # median
times, mlat, mlt = utils.moving_func_trim(median_window, times, data['mlat'], data['mlt']) # trim
background = utils.centered_bn_func(bn.move_mean, log_ne, mean_window, min_count=10) # moving average
times, log_ne, mlat, mlt = utils.moving_func_trim(mean_window, times, log_ne, mlat, mlt) # trim
mlt[mlt > 12] -= 24
return times, log_ne, background, mlat, mlt
def get_closest_segment(timestamp, mlat, tec_time, enter_lat, exit_lat=None):
"""given a time corresponding to a TEC map, find the closest segment defined by an enter and exit latitude
Parameters
----------
timestamp, mlat: numpy.ndarray
tec_time, enter_lat: float
exit_lat: (optional) float
Returns
-------
slice
"""
if exit_lat is None:
enter_mask = mlat >= enter_lat
exit_mask = mlat < enter_lat
else:
if enter_lat < exit_lat:
enter_mask = mlat >= enter_lat
exit_mask = mlat >= exit_lat
else:
enter_mask = mlat <= enter_lat
exit_mask = mlat < exit_lat
starts, ends = get_region_bounds(enter_mask, exit_mask)
centers = (starts + ends) // 2
if centers.size == 0:
return np.array([], dtype=int), np.array([], dtype=int)
best_centers = np.argmin(abs(timestamp[None, centers] - (tec_time[:, None] + np.timedelta64(30, 'm'))), axis=1)
return starts[best_centers], ends[best_centers]
def get_region_bounds(enter_mask, exit_mask):
"""Given masks indicating where the region has started and where the region has ended, return starting and ending
indices for the region. For finding starting and ending indices of 45-75 MLat segments.
example:
enter_mask = mlat >= 45
exit_mask = mlat > 75
Parameters
----------
enter_mask: numpy.ndarray[bool]
exit_mask: numpy.ndarray[bool]
Returns
-------
starts, ends: numpy.ndarray[int]
"""
# enter_mask = utils.centered_bn_func(bn.move_median, enter_mask, 7, pad=True, min_count=1)
# exit_mask = utils.centered_bn_func(bn.move_median, exit_mask, 7, pad=True, min_count=1)
starts, = np.nonzero(np.diff(enter_mask.astype(int)) == 1)
ends, = np.nonzero(np.diff(exit_mask.astype(int)) == 1)
starts += 1
ends += 1
dist = (ends[:, None] - starts[None, :]).astype(float)
dist[dist <= 0] = np.inf
end_pick_mask = np.isfinite(dist).any(axis=1)
if end_pick_mask.sum() == 0:
return np.array([], dtype=int), np.array([], dtype=int)
start_pick_ind, end_pick_ind = np.unique(np.argmin(dist[end_pick_mask, :], axis=1), return_index=True)
return starts[start_pick_ind], ends[end_pick_mask][end_pick_ind]
def find_troughs_in_segment(mlat, smooth_dne, threshold=-.15, width_min=1, width_max=17, fin_rmin=.25):
"""find troughs in a 45-75 segment. `mlat` contains no NaNs, smooth_dne may contain NaNs.
Parameters
----------
mlat, smooth_dne: numpy.ndarray[float]
threshold, width_min, width_max, fin_rmin: float
Returns
-------
trough:
- if trough: min_idx, edge_1, edge_2
- if no trough: False
"""
trough_candidates = []
# fill NaNs
fin_mask = np.isfinite(smooth_dne)
smooth_dne_i = smooth_dne.copy() # i for interpolated
if not fin_mask.any():
return []
if not fin_mask.all():
interpolator = interp1d(mlat[fin_mask], smooth_dne[fin_mask], kind='previous', bounds_error=False, fill_value=0)
smooth_dne_i[~fin_mask] = interpolator(mlat[~fin_mask])
# find zero crossings
zerox, = np.nonzero(np.diff(smooth_dne_i >= 0))
zerox += 1
zerox = np.concatenate(([0], zerox, [smooth_dne_i.shape[0] - 1]))
# check each interval
if mlat[0] > mlat[-1]:
iterator = range(zerox.shape[0] - 2, -1, -1)
else:
iterator = range(zerox.shape[0] - 1)
for i in iterator:
edge_1 = zerox[i]
edge_2 = zerox[i + 1]
width = abs(mlat[edge_1] - mlat[edge_2])
if not width_min <= width <= width_max:
continue
if fin_mask[edge_1:edge_2].mean() < fin_rmin:
continue
min_idx = edge_1 + np.nanargmin(smooth_dne[edge_1:edge_2])
dne_min = smooth_dne[min_idx]
if dne_min <= threshold:
trough_candidates.append((min_idx, edge_1, edge_2))
return trough_candidates
def get_segments_data(tec_times):
"""This function collects and organizes satellite data into "orbital segments" from 45 - 75 mlat. There are two
orbital segments per orbit of the satellite: 'up' and 'down', where the mlat is increasing and decreasing with
time respectively. Finally there are multiple satellites and so for each tec time, there are 2 * n_sats segments.
Parameters
----------
tec_times: numpy.ndarray[datetime64]
mission: str
Returns
-------
dict
{
sat: [
dict{
'up': [
dict{times, mlat, mlt, dne, smooth_dne, direction, tec_time_1},
...,
dict{times, mlat, mlt, dne, smooth_dne, direction, tec_time_N},
],
'down': [...]
},
...
]
}
"""
dt = np.timedelta64(5, 'h')
i = np.argwhere(abs(np.diff(tec_times)) > 2 * dt)[:, 0]
interval_start = np.concatenate((tec_times[[0]], tec_times[i + 1]), axis=0) - dt
interval_end = np.concatenate((tec_times[i], tec_times[[-1]]), axis=0) + dt
sat_segments = {sat: {direction: [] for direction in DIRECTIONS} for sat in SATELLITES['swarm']}
for idx in range(len(interval_start)):
tt = tec_times[(tec_times > interval_start[idx]) * (tec_times < interval_end[idx])]
sat_data, sat_times = io.get_swarm_data(interval_start[idx], interval_end[idx])
for sat in SATELLITES['swarm']:
times, log_ne, background, mlat, mlt = process_swarm_data_interval(sat_data[sat], sat_times)
dne = log_ne - background
smooth_dne = utils.centered_bn_func(bn.move_mean, dne, 9, pad=True, min_count=1)
fin_mask = np.isfinite(mlat)
for direction, (enter, exit) in DIRECTIONS.items():
starts, stops = get_closest_segment(times[fin_mask], mlat[fin_mask], tt, enter, exit)
for i, (start, stop) in enumerate(zip(starts, stops)):
sl = slice(start, stop)
if abs(times[fin_mask][(start + stop) // 2] - (tt[i] + np.timedelta64(30, 'm'))) > np.timedelta64(2, 'h'):
continue
data = {
'times': times[fin_mask][sl],
'mlat': mlat[fin_mask][sl],
'mlt': mlt[fin_mask][sl],
'log_ne': log_ne[fin_mask][sl],
'dne': dne[fin_mask][sl],
'smooth_dne': smooth_dne[fin_mask][sl],
'direction': direction,
'tec_time': tt[i],
'tec_ind': i,
}
sat_segments[sat][direction].append(data)
return sat_segments
def get_troughs(segments):
sat_troughs = []
for sat, sat_segments in segments.items():
for direction, segms in sat_segments.items():
for seg in segms:
trough_candidates = find_troughs_in_segment(seg['mlat'], seg['smooth_dne'])
data_rows = []
seg_info = (sat, seg['mlat'][0], seg['mlt'][0], seg['mlat'][-1], seg['mlt'][-1], seg['tec_ind'], seg['tec_time'], direction)
trough_unpacked = (False, 0, 0, 0, 0, 0, 0, 0)
data_rows.append(trough_unpacked + seg_info)
for tc in trough_candidates:
min_idx, e1_idx, e2_idx = tc
trough_unpacked = (True, seg['mlat'][min_idx], seg['mlt'][min_idx], seg['smooth_dne'][min_idx],
seg['mlat'][e1_idx], seg['mlt'][e1_idx], seg['mlat'][e2_idx], seg['mlt'][e2_idx])
data_rows.append(trough_unpacked + seg_info)
sat_troughs += data_rows
sat_troughs = pandas.DataFrame(data=sat_troughs,
columns=['trough', 'min_mlat', 'min_mlt', 'min_dne', 'e1_mlat', 'e1_mlt', 'e2_mlat',
'e2_mlt', 'sat', 'seg_e1_mlat', 'seg_e1_mlt', 'seg_e2_mlat',
'seg_e2_mlt', 'tec_ind', 'tec_time', 'direction'])
assert not np.any(np.isnan(sat_troughs[['min_mlat', 'min_mlt', 'min_dne', 'e1_mlat', 'e1_mlt', 'e2_mlat', 'e2_mlt',
'seg_e1_mlat', 'seg_e1_mlt', 'seg_e2_mlat', 'seg_e2_mlt', 'tec_ind', 'tec_time']]))
return sat_troughs
def fix_trough_list(tec_times, troughs):
dates = np.unique(tec_times.astype("datetime64[D]"))
results = []
for t, date in enumerate(dates):
for tec_ind in range(24):
for s, sat in enumerate(SATELLITES['swarm']):
for d, direction in enumerate(DIRECTIONS):
mask = (
(troughs['tec_ind'] == tec_ind) &
(troughs['sat'] == sat) & (troughs['direction'] == direction) &
(troughs['tec_time'].values.astype("datetime64[D]") == date)
)
sat_ind_results = troughs[mask].copy()
sat_ind_results['tec_ind'] = 24 * t + tec_ind
sat_ind_results['sat_ind'] = 144 * t + 6 * tec_ind + 2 * s + d
results.append(sat_ind_results)
results = pandas.concat(results, ignore_index=True).sort_values('sat_ind').reset_index(drop=True)
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
import re
import datetime
import uuid
import logging
class TelegramBot:
# telegram bot settings
last_update_id = 0
start_message = '{} Я могу сообщать текущую температуру в Поселке Программистов в ответ на любое сообщение.'
schedule_question_message = 'Во сколько вам присылать температуру? Ответить можно в таком формате: 9:11, 913, 15-34, 09:20, 0745 и тп'
schedule_success_message = 'Отлично! Теперь вы будете получать сообщения каждый день в {}:{}'
schedule_parsing_fail_message = 'Не понятно. :( Во сколько вам присылать температуру? Ответьте в одном из форматов: 9:11, 913, 15-34, 09:20, 0745 и тп'
schedule_fail_message = 'Не удалось запланировать. Произошла какая-то ошибка! Но это не точно.'
def __init__(self, bot_id, bot_api_key, should_use_subscriptions, temperature_provider, scheduler):
self.__temperature_provider = temperature_provider
self.__scheduler = scheduler
self.__subscribers_last_notification_time = datetime.datetime.now()
self.__schedule_awaiting_chat_ids = set()
self.__bot_url = "https://api.telegram.org/bot{}:{}/".format(bot_id, bot_api_key)
self.__should_use_subscriptions = should_use_subscriptions
def __send_message(self, chat, text):
params = {'chat_id': chat, 'text': text}
try:
response = requests.post(self.__bot_url + 'sendMessage', data=params)
return response
except Exception as e:
logging.exception('Ошибка Telegram API sendMessage.')
return None
def __answer_inline_query(self, inline_query_id, text):
input_message_content = {'message_text': text}
result = { 'type': 'article', 'id': str(uuid.uuid1()), 'title': text, 'input_message_content': input_message_content, 'thumb_url': 'https://t3.ftcdn.net/jpg/01/93/96/42/240_F_193964277_ctURMub96PZdUvuZijDbRUTK5uBVmBXF.jpg' }
results_json = json.dumps([result])
params = {'inline_query_id': inline_query_id, 'results': results_json }
try:
response = requests.post(self.__bot_url + 'answerInlineQuery', data=params)
return response
except Exception as e:
logging.exception('Ошибка Telegram API answerInlineQuery.')
return None
def __broadcast(self, chat_ids, message):
for chat_id in chat_ids:
self.__send_message(chat_id, message)
def __get_updates(self):
offset = self.last_update_id + 1
params = {'timeout': 30, 'offset': offset}
response_json = {}
try:
response = requests.get(self.__bot_url + 'getUpdates', params)
response_json = response.json()
except Exception as e:
logging.exception('Ошибка Telegram API getUpdates.')
updates = response_json.get('result', [])
if len(updates) > 0:
update_ids = [update['update_id'] for update in updates]
self.last_update_id = max(update_ids)
return updates
def __parseTime(self, timeStr):
regex = r'^(?P<hours>\d{1,2}?)[:\-/\\ ]?(?P<minutes>\d{1,2})$'
result = re.search(regex, timeStr)
if result == None:
return (False, None)
hours = result.group('hours')
minutes = result.group('minutes')
return (True, (int(hours), int(minutes)))
def __get_actual_temperature_message_text(self):
temperature = self.__temperature_provider.getActualTemperature()
temperature_message = 'Сейчас в поселке ' + \
str(temperature) + ' градусов'
return temperature_message
def __get_message(self, update):
message = update.get('message', None)
if message == None:
message = update.get('edited_message', None)
return message
def __get_username(self, message):
try:
# TODO: переделать на {}.get(...), избавиться от try-catch
return message['from']['username']
except:
return None
def processUpdates(self):
temperature_message = self.__get_actual_temperature_message_text()
notified_chat_ids = set()
updates = self.__get_updates()
for update in updates:
# обработка inline запроса
inline_query = update.get('inline_query', None)
if inline_query != None:
id = inline_query['id']
# TODO: обрабатывать результат __answer_inline_query, чтобы убедиться что ответы отправлены
self.__answer_inline_query(id, temperature_message)
continue
message = self.__get_message(update)
if message == None:
continue
chat_id = message['chat']['id']
username = self.__get_username(message)
# Список команд с описаниями для BotFather
# subscribe - Запланировать уведомление
# list - Вывести список запланированных уведомлений
# clear - Удалить все запланированные уведомления
text = message.get('text', None)
if text == '/start':
# TODO: поля username может не быть
name = username or message['from'].get('first_name', None)
welcomeText = 'Привет!' if name == None else 'Привет, {}!'.format(name)
reply = self.start_message.format(welcomeText)
self.__send_message(chat_id, reply)
elif text == '/schedule' or text == '/plan' or text == '/subscribe':
if self.__should_use_subscriptions:
self.__send_message(chat_id, self.schedule_question_message)
self.__schedule_awaiting_chat_ids.add(chat_id)
else:
self.__send_message(chat_id, "Подписки отключены")
elif chat_id in self.__schedule_awaiting_chat_ids:
is_success, time = self.__parseTime(text)
if is_success:
hours, minutes = time
if self.__scheduler.schedule(chat_id, hours, minutes):
reply = self.schedule_success_message.format(
str(hours).zfill(2), str(minutes).zfill(2))
self.__send_message(chat_id, reply)
self.__schedule_awaiting_chat_ids.remove(chat_id)
else:
self.__send_message(
chat_id, self.schedule_fail_message)
else:
self.__send_message(
chat_id, self.schedule_parsing_fail_message)
elif text == '/list':
if self.__should_use_subscriptions:
times = self.__scheduler.get_timetable_by_chat_id(chat_id)
timetable = sorted([str(t.hour).zfill(2) + ':' + str(t.minute).zfill(2) for t in times])
timesStr = ', '.join(timetable)
reply = 'У вас нет подписок на увдомления о температуре.'
if len(times) > 0:
reply = 'Вы узнаете температуру ежедневно в {}.'.format(timesStr)
self.__send_message(chat_id, reply)
else:
self.__send_message(chat_id, "Подписки отключены")
elif text == '/clear':
if self.__should_use_subscriptions:
self.__scheduler.clear_subscriptions(chat_id)
else:
self.__send_message(chat_id, "Подписки отключены")
elif not chat_id in notified_chat_ids:
self.__send_message(chat_id, temperature_message)
notified_chat_ids.add(chat_id)
# TODO: можно удалять бесполезное исходное сообщение см метод https://core.telegram.org/bots/api#deletemessage
# удалять нужно только прямые сообщения боту в чате с ботом, а не инлайн вызовы бота.
def processSubscribers(self):
if not self.__should_use_subscriptions:
return
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
if now - self.__subscribers_last_notification_time < delta:
return
chat_ids = self.__scheduler.get_chat_ids_by_time_range(self.__subscribers_last_notification_time, now)
if len(chat_ids) == 0:
return
temperature_message = self.__get_actual_temperature_message_text()
self.__broadcast(chat_ids, temperature_message)
self.__subscribers_last_notification_time = now
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class RouteRange(Base):
"""A set of routes to be included in the router interface.
The RouteRange class encapsulates a list of routeRange resources that are managed by the user.
A list of resources can be retrieved from the server using the RouteRange.find() method.
The list can be managed by using the RouteRange.add() and RouteRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = "routeRange"
_SDM_ATT_MAP = {
"Enabled": "enabled",
"FirstRoute": "firstRoute",
"MaskWidth": "maskWidth",
"Metric": "metric",
"NextHop": "nextHop",
"NoOfRoutes": "noOfRoutes",
"RouteTag": "routeTag",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(RouteRange, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables the use of this route range for the simulated router.
"""
return self._get_attribute(self._SDM_ATT_MAP["Enabled"])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["Enabled"], value)
@property
def FirstRoute(self):
# type: () -> str
"""
Returns
-------
- str: The first network address to be used in creating this route range. Note: Multicast and loopback addresses are not supported in this IPv4 route range implementation.
"""
return self._get_attribute(self._SDM_ATT_MAP["FirstRoute"])
@FirstRoute.setter
def FirstRoute(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["FirstRoute"], value)
@property
def MaskWidth(self):
# type: () -> int
"""
Returns
-------
- number: The network mask to be applied to the networkIpAddress to yield the non-host part of the address. A value of 0 means there is no subnet address.
"""
return self._get_attribute(self._SDM_ATT_MAP["MaskWidth"])
@MaskWidth.setter
def MaskWidth(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["MaskWidth"], value)
@property
def Metric(self):
# type: () -> int
"""
Returns
-------
- number: The total metric cost for these routes. The valid range is from 1 to 16 (inclusive). A value of 16 means that the destination is not reachable, and that route will be removed from service.
"""
return self._get_attribute(self._SDM_ATT_MAP["Metric"])
@Metric.setter
def Metric(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["Metric"], value)
@property
def NextHop(self):
# type: () -> str
"""
Returns
-------
- str: The immediate next hop IP address on the way to the destination address.
"""
return self._get_attribute(self._SDM_ATT_MAP["NextHop"])
@NextHop.setter
def NextHop(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP["NextHop"], value)
@property
def NoOfRoutes(self):
# type: () -> int
"""
Returns
-------
- number: The number of networks to be generated for this route range, based on the network address plus the network mask.
"""
return self._get_attribute(self._SDM_ATT_MAP["NoOfRoutes"])
@NoOfRoutes.setter
def NoOfRoutes(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["NoOfRoutes"], value)
@property
def RouteTag(self):
# type: () -> int
"""
Returns
-------
- number: An arbitrary value associated with the routes in this range. It is used to provide a means for distinguishing internal versus external RIP routes.
"""
return self._get_attribute(self._SDM_ATT_MAP["RouteTag"])
@RouteTag.setter
def RouteTag(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP["RouteTag"], value)
def update(
self,
Enabled=None,
FirstRoute=None,
MaskWidth=None,
Metric=None,
NextHop=None,
NoOfRoutes=None,
RouteTag=None,
):
# type: (bool, str, int, int, str, int, int) -> RouteRange
"""Updates routeRange resource on the server.
Args
----
- Enabled (bool): Enables the use of this route range for the simulated router.
- FirstRoute (str): The first network address to be used in creating this route range. Note: Multicast and loopback addresses are not supported in this IPv4 route range implementation.
- MaskWidth (number): The network mask to be applied to the networkIpAddress to yield the non-host part of the address. A value of 0 means there is no subnet address.
- Metric (number): The total metric cost for these routes. The valid range is from 1 to 16 (inclusive). A value of 16 means that the destination is not reachable, and that route will be removed from service.
- NextHop (str): The immediate next hop IP address on the way to the destination address.
- NoOfRoutes (number): The number of networks to be generated for this route range, based on the network address plus the network mask.
- RouteTag (number): An arbitrary value associated with the routes in this range. It is used to provide a means for distinguishing internal versus external RIP routes.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(
self,
Enabled=None,
FirstRoute=None,
MaskWidth=None,
Metric=None,
NextHop=None,
NoOfRoutes=None,
RouteTag=None,
):
# type: (bool, str, int, int, str, int, int) -> RouteRange
"""Adds a new routeRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Enables the use of this route range for the simulated router.
- FirstRoute (str): The first network address to be used in creating this route range. Note: Multicast and loopback addresses are not supported in this IPv4 route range implementation.
- MaskWidth (number): The network mask to be applied to the networkIpAddress to yield the non-host part of the address. A value of 0 means there is no subnet address.
- Metric (number): The total metric cost for these routes. The valid range is from 1 to 16 (inclusive). A value of 16 means that the destination is not reachable, and that route will be removed from service.
- NextHop (str): The immediate next hop IP address on the way to the destination address.
- NoOfRoutes (number): The number of networks to be generated for this route range, based on the network address plus the network mask.
- RouteTag (number): An arbitrary value associated with the routes in this range. It is used to provide a means for distinguishing internal versus external RIP routes.
Returns
-------
- self: This instance with all currently retrieved routeRange resources using find and the newly added routeRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained routeRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(
self,
Enabled=None,
FirstRoute=None,
MaskWidth=None,
Metric=None,
NextHop=None,
NoOfRoutes=None,
RouteTag=None,
):
# type: (bool, str, int, int, str, int, int) -> RouteRange
"""Finds and retrieves routeRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve routeRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all routeRange resources from the server.
Args
----
- Enabled (bool): Enables the use of this route range for the simulated router.
- FirstRoute (str): The first network address to be used in creating this route range. Note: Multicast and loopback addresses are not supported in this IPv4 route range implementation.
- MaskWidth (number): The network mask to be applied to the networkIpAddress to yield the non-host part of the address. A value of 0 means there is no subnet address.
- Metric (number): The total metric cost for these routes. The valid range is from 1 to 16 (inclusive). A value of 16 means that the destination is not reachable, and that route will be removed from service.
- NextHop (str): The immediate next hop IP address on the way to the destination address.
- NoOfRoutes (number): The number of networks to be generated for this route range, based on the network address plus the network mask.
- RouteTag (number): An arbitrary value associated with the routes in this range. It is used to provide a means for distinguishing internal versus external RIP routes.
Returns
-------
- self: This instance with matching routeRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of routeRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the routeRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
class cbase():
def __init__(self) :
self.items = []
self.indentLvl = 0
self.indentAmt = 2
def generateOrUse(self, item):
if hasattr(item, "generate"):
item.indentLvl = self.indentLvl + 1
return item.generate()
else:
return self.getIndentStr() + item.lstrip()
def getIndentStr(self, offset = 0) :
return " " * self.indentAmt * (self.indentLvl + offset)
def add(self, item, before=None, after=None):
itemsAdded = []
items = item
if type(item) is not list and type(item) is not tuple:
items = [item]
for item in items:
if after is not None and before is not None:
raise Exception("Cannot set both before and after")
if before is not None:
index = self.items.index(before)
self.items.insert(index, item)
elif after is not None:
index = self.items.index(after) + 1
self.items.insert(index, item)
else:
self.items.append(item)
itemsAdded.append(item)
return itemsAdded
def addComment(self, comment):
self.add("/* {} */\n".format(comment))
def generateDeclaration(self):
pass
def generate(self):
indentedLines = [ self.generateOrUse(item) for item in self.items ]
return "".join(indentedLines) |
from __future__ import absolute_import, division, print_function
import sys
import os
sys.path.append(os.environ['PERF_EXEC_PATH'] +
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
expected_command = sys.argv[1]
def process_event(params):
if params["comm"] != expected_command:
return
sample = params["sample"]
print("%d\t%d" % (sample["addr"], sample["ip"]))
|
import math
import numpy as np
import random
# * parameters
k_fold = 5
random.seed(291)
# * import/read raw dataset
input_filename = "admission_predict.csv"
with open(input_filename, "r") as f:
dataset = f.readlines()
# * cleanse dataset (i.e. remove header)
dataset = dataset[1:]
# * shuffle relavant inputs
random.shuffle(dataset)
# * split dataset
dataset_size = len(dataset)
dataset_size_70 = int(math.floor(0.7 * (dataset_size)))
dataset_split = [dataset[:dataset_size_70], dataset[dataset_size_70:]]
# * export/write split dataset
output_filenames = ["train", "test"]
for i in range(len(output_filenames)):
with open(output_filenames[i] + "_data.csv", "w") as f:
for entry in dataset_split[i]:
f.write("%s" % entry)
|
#Tutor assistant database file, by Cyani
#This is the main program. All other programs written are run from here
#Thank you to Python Central for teaching me how to use basic database operations in python using sqlite3
#Credit is also given in each file where due
#Throught the program, procedures beggining with "a" are admin, "t" are teacher, and "s" are students
#For example, where you see accesslevel =='a', it means if the current user is an admin
#If you are struggling to understand the database sections, I reccomend to read through Python Central's sqlite3 tutorial
#Basically, db is the current accessed database, commit means save changes, and close is used to close the connection.
#From there, it is basically SQL
#import pdb; pdb.set_trace()
try:
import sqlite3
import time
import datetime
except:
print("Please ensure you have the sqlite3, time, and datetime modules installed")
#This is used to see if all the nessasary databases are present in the folder
#If they are, an error will be raised, hence the except
#If they are not, they are created
def tableCheck():
try:
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''
CREATE TABLE students(id INTEGER PRIMARY KEY, name TEXT,
class TEXT, email TEXT unique, Homework TEXT, content TEXT)
''')
db.commit()
db.close()
print("student database not found\nstudent database created")
except:
db.close()
print("student database found")
try:
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''
CREATE TABLE teachers(id INTEGER PRIMARY KEY, name TEXT,
class TEXT, email TEXT unique)
''')
db.commit()
db.close()
print("teacher database not found\nteacher database created")
except:
db.close()
print("teacher database found")
try:
db = sqlite3.connect('adb.db')
cursor = db.cursor()
cursor.execute('''
CREATE TABLE admins(id INTEGER PRIMARY KEY, name TEXT,
class TEXT, email TEXT unique)
''')
db.commit()
#An admin is created so the program can be used right away
cursor.execute('''INSERT INTO admins(name)
VALUES(?)''', ("admin",)
)
db.commit()
db.close()
print("admin database not found\nadmin database created")
except:
db.close()
print("admin database found")
try:
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''
CREATE TABLE detention(id INTEGER PRIMARY KEY, name TEXT,
class TEXT, reason TEXT)
''')
db.commit()
db.close()
print("detention database not found\ndetention database created\n")
except:
db.close()
print("detention database found\n")
#Checks which students are in detention
def checkDetention():
if accesslevel == "a":
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''SELECT name,reason FROM detention''')
#Teachers are only allowed to see students in their class who are in detention
elif accesslevel == "t":
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM teachers WHERE name=?''', (users_name,))
obtained = cursor.fetchone()
if obtained == None:
print("You are not assigned to a class, so you cannot view your pupils in detention")
else:
obtained = obtained[0]
db.close()
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''SELECT name,reason FROM detention WHERE class =? ''', (obtained,))
names = cursor.fetchall()
#It is possible to get None or [] if the detention is emtpy depending on what access level is calling the procedure
if names in [None,[]]:
print("No one is in detention")
else:
for x in names:
print(x[0] + "is in detention for " + x[1])
db.close()
#Add a student to detention
def addToDetention():
#Which student gets put into detention
name = input("What is the name of the student you would like to put into detention?\n")
reason = input("What is the reason you have added the pupil to the detention list?\n:")
if accesslevel == "t":
#Get the teacher's class
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM teachers WHERE name=?''', (users_name,))
classname = cursor.fetchone()
if classname == None:
print("You are not assigned to a class, so cannot give any students detention (can only give detention to your own studetns) ")
else:
#Find the students
classname = classname[0]
db.close()
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
#Check student is in the class
cursor.execute('''SELECT name FROM students WHERE class=?, name=?''', (classname,name,))
name = cursor.fetchone()
if name == None:
print("This student is not in your class, so you cannot give them detention")
else:
name = name[0]
db.close()
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO detention(name, class, reason)
VALUES(?,?,?)''', (name, classname, reason))
db.commit()
db.close()
print(name + " have been added to the detention list")
elif accesslevel == "a":
#Get the class of the student
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM students WHERE name=?''', (name,))
classname = cursor.fetchone()
db.close()
#If the student isnt found, say so
if classname == None:
print("This student was not found")
else:
#Add the student to the detention list
classname = classname[0]
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO detention(name, class, reason)
VALUES(?,?,?)''', (name, classname, reason))
db.commit()
db.close()
print(name + " has been added to the detention list")
#Remove a student from detention
def removeFromDetention():
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
#Who is getting out of detention
del_name = input("Enter the full name of the student you would like to delete\n:")
cursor.execute('''SELECT name FROM detention WHERE name=?''', (del_name,))
del_name = cursor.fetchone()
#If they are not on the detetnion list, say so
if del_name == None:
print("This student is not in detention")
else:
#Remove the specified student from the detention list
del_name = del_name[0]
cursor.execute('''DELETE FROM detention WHERE name = ? ''', (del_name,))
db.commit()
db.close()
print(del_name, " has been removed from the detention database")
#Change the Homework status. This is also used to create Homework
def updateHomework():
year = int(input("What year is the Homework due? (for 2019, type \"2019\")\n:"))
month = int(input("What month is the Homework due? (for January, type \"1\")\n:"))
day = int(input("What day of the month is the Homework due? (for the 15th, type \"15\")\n:"))
date = datetime.date(year, month, day)
Homework = input("What is the Homework that you have assigned?\n")
if accesslevel == "t":
#Find the teacher's class
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM teachers WHERE name=?''', (users_name,))
classfind = cursor.fetchone()[0]
db.close()
#Set the Homework to all students currently in the teachers class
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''UPDATE students SET Homework = ?, content = ? WHERE class = ? ''',
(date, Homework, classfind))
db.commit()
db.close()
print("Homework has been updated for", date, "and it is to do", Homework)
if accesslevel == "a":
#Assign the Homework to all students currently in the inputted class
classHomework = input("What class is this for?\n:")
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''UPDATE students SET Homework = ?, content = ? WHERE class = ? ''',
(date, Homework, classHomework))
db.commit()
db.close()
print("Homework has been updated for", date, "and it is to do", Homework)
#This is the homework checking procedure
def checkHomework():
#If the user is a student, select their Homework
if accesslevel == "s":
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT Homework FROM students WHERE name=?''', (users_name,))
#If the user is a teacher, find out the teacher's class and check the Homework for that class
elif accesslevel == "t":
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM teachers WHERE name=?''', (users_name,))
obtained = cursor.fetchone()
db.close()
if obtained == None:
print("You are not currently assigned to a class. You cannot assign Homework for a class that does not exist.")
else:
obtained = obtained[0]
#get the assigned Homework for the class
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT Homework FROM students WHERE class=?''', (obtained,))
elif accesslevel == "a":
class_option = input("Which class would you like to check? (for class 1A, type \"1A\", case sensitive)\n:")
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT Homework FROM students WHERE class=?''', (class_option,))
student = cursor.fetchone()
db.close()
if student == None:
print("No Homework")
if accesslevel != 's':
print("This can occur if there are no students in your class or the class does not exist")
else:
#Calculate how many days until Homework is due
classfind = student[0]
year = int(classfind[0:4])
month = int(classfind[5:7])
day = int(classfind[8:])
date = datetime.date(year,month, day)
cyear = year - int(datetime.date.today().strftime("%Y"))
days = int((365*cyear) + int(date.strftime("%j")))
current = int(days) - int(datetime.date.today().strftime("%j"))
#If Homework is due today, ask the student if they have done it
if current <= 0:
if current == 0:
print("Homework is due today")
else:
print("Homework is overdue")
if accesslevel == "s":
Homework = input("Do you have your Homework to hand in? yes (y) or no (n)\n:").lower()
if Homework == "y":
print("Please hand in your Homework to the teacher, they will delete the Homework status from your acoount")
#If not, add them to the detention list
elif Homework == "n":
reason = "This student did not do their assigned Homework"
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM students WHERE name=?''', (users_name,))
classname = cursor.fetchone()
classname = classname[0]
db.close()
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO detention(name, class, reason)
VALUES(?,?,?)''', (users_name, classname, reason))
db.commit()
db.close()
print("You have been added to the detention list")
#If Homework is due in the future, say how many days until it is due
elif current > 0:
print("Homework is due in ", current, " days")
#Just to be sure!
else:
print("Error")
#Add a teacher to the database (admin only)
def addTeacher():
#Get the teacher's infomation
fname = input('Enter teacher\'s first name:\n')
lname = input('Enter teacher\'s last name:\n')
class_set = input('Enter teacher\'s set:\n')
#Combine the teachers first and last name, and set up email
name = fname + " " + lname
email = name + class_set + "@school.com"
#add to the teacher database
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO teachers(name, class, email)
VALUES(?,?,?)''', (name,class_set, email))
db.commit()
print(fname + " " + lname + " has been added to the teacher database")
db.close()
#Find what student has what teacher
def findStudentsTeacher():
student_name = input("What is the students name?\n:")
#Find the student inputted
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT class FROM students WHERE name=?''', (student_name,))
classfind = cursor.fetchone()
db.close()
#If the student doesn't have a teacher/not added, say so
if classfind == None:
print("This student does not have a teacher yet")
else:
#Otherwise, find the teacher in the students class
classfind = classfind[0]
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT name FROM teachers WHERE class=?''', (classfind,))
name = cursor.fetchone()
if name == None:
print("This student's class has not been assigned a teacher yet!")
else:
print(name[0], "is the teacher of", student_name)
#Add student to the database (admin only)
def addStudent():
#Get students infomation
fname = input('Enter student\'s first name:\n')
lname = input('Enter student\'s last name:\n')
class_set = input('Enter student\'s set:\n')
#Set up full name and email address
name = fname + " " + lname
email = name + class_set + "@school.com"
#Add them to the student database
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO students(name, class, email)
VALUES(?,?,?)''', (name,class_set, email))
db.commit()
db.close()
print(fname + " " + lname + " has been added to the student database")
#Delete a student from the database (admin only)
def deleteStudent():
#enter student name to be deleted
del_name = input("Enter the full name of the student you would like to delete\n:")
#delete that student
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''DELETE FROM students WHERE name = ? ''', (del_name,))
db.commit()
db.close()
db = sqlite3.connect('ddb.db')
cursor = db.cursor()
cursor.execute('''DELETE FROM detention WHERE name = ? ''', (del_name,))
db.commit()
db.close()
print(del_name, "has been removed from the student database")
#Delete a teacher from the database (admin only)
def deleteTeacher():
#enter teacher name to be deleted
del_name = input("Enter the full name of the student you would like to delete\n:")
#delete that teacher
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''DELETE FROM teachers WHERE name = ? ''', (del_name,))
db.commit()
db.close()
print(del_name, "has been removed from the teacher database")
#delete a students Homework
def deleteHomework():
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
if accesslevel == "t":
#Set the Homework to empty string
cursor.execute('''UPDATE students SET Homework = ? WHERE teachers = ? ''',
("", users_name))
elif accesslevel == "a":
del_name = input("Enter the class of the set Homework you would like to delete\n:")
#Set the Homework to empty string
cursor.execute('''UPDATE students SET Homework = ? WHERE class = ? ''',
("", del_name))
db.commit()
db.close()
#Find a student's infomation
def find():
#What student's info needs to be found
student_name = input("What is the students full name?\n:")
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
#If a student wants another students info, make sure it is not sensitive (basically only show the students contact infomation provided by the school)
if accesslevel == "s":
cursor.execute('''SELECT email FROM students WHERE name=?''', (student_name,))
else:
cursor.execute('''SELECT id, email, Homework FROM students WHERE name=?''', (student_name,))
student = cursor.fetchone()
if student == None:
print("That student was not found")
else:
print(student)
#describes what keypress achieves what task
def helpMe():
choices = {
#Admins can:
'a': {
's':"add a student",
't':"add a teacher",
'd':"delete a student",
'p':"delete a teacher",
},
#Admins and teachers can:
't':{
'l':"find a students teacher",
'u':"update Homework",
'e':"add to detention",
'r':"remove from detention",
'k':"check detention",
'i':"delete Homework",
},
#All users can:
's':{
'f':"find a student's infomation",
'c':"check Homework"
}
}
#Create empty list and add options depending on access level
options = []
if accesslevel == 'a':
options += list(choices['a'].items())
options += list(choices['t'].items())
if accesslevel == 't':
options += list(choices['t'].items())
options += list(choices['s'].items())
#print all possible keypresses with their corrisponding action (obviously not unexpected keypresses)
for x in options:
print("Press " + x[0] + " to " + x[1])
#Where the user can choose what action they want to do
def menu():
choice = input("What would you like to do?(press h for help)\n:").lower()
#If the users chooses q, return false to stop the while loop
if choice == 'q':
return False
#empty#
#added single run
#multi run
choices = {
#Admins can:
'a': {
's':addStudent,#
't':addTeacher,#
'd':deleteStudent,
'p':deleteTeacher,
},
#Admins and teachers can:
't':{
'l':findStudentsTeacher,#Do with student, but without teacher
'u':updateHomework,
'e':addToDetention,
'r':removeFromDetention,
'k':checkDetention,
'i':deleteHomework,
},
#All users can:
's':{
'f':find,
'c':checkHomework,
'h':helpMe
}
}
#If an option is not in an accesslevel section, it will raise an error because it does not exist
#Goes on to check all accesslevels that the user can access
try:
choices['s'][choice]()
except:
if accesslevel in ['t','a']:
try:
choices['t'][choice]()
except:
if accesslevel == 't':
print("Invalid Input")
else:
try:
choices['a'][choice]()
except:
print("Invalid Input")
#If the user dosent quit, keep the while loop going
return True
def start():
#The username and access level needs to be accessed througout the program
global accesslevel
global users_name
try:
import Motion
except:
print("The motion sensors failed. Please ensure you are using the raspberry pi, have the correct modules installed, and the components are properly connected to the correct pins")
print("Launching login GUI")
try:
import login
except:
print("login gui failed. Please ensure you have the correct modules installed")
try:
import scanner
except:
print("QR code scanner failed. Please ensure you have a compatible camera plugged in and the correct modules installed")
print("For security reasons, you cannot proceed")
return False
users_name = scanner.database_recieve
#The following code checks if the person is in the database
db = sqlite3.connect('sdb.db')
cursor = db.cursor()
cursor.execute('''SELECT id FROM students WHERE name=?''', (users_name,))
result = cursor.fetchone()
db.close()
if result == None:
db = sqlite3.connect('tdb.db')
cursor = db.cursor()
cursor.execute('''SELECT id FROM teachers WHERE name=?''', (users_name,))
result = cursor.fetchone()
db.close()
if result == None:
db = sqlite3.connect('adb.db')
cursor = db.cursor()
cursor.execute('''SELECT id FROM admins WHERE name=?''', (users_name,))
result = cursor.fetchone()
db.close()
if result == None:
print("\nYou are not in the database, so you cannot access this programme!")
return False
else:
accesslevel = "a"
print("Welcome", users_name + ". Your current status is administrator. If you believe this is an error, contact an administrator")
return True
else:
accesslevel = "t"
print("Welcome", users_name + ". Your current status is teacher. If you believe this is an error, contact an administrator")
return True
else:
accesslevel = "s"
print("\nWelcome", users_name + ". Your current status is student. If you believe this is an error, contact an administrator\n\n")
checkHomework()
return True
tableCheck()
stop = start()
while stop:
stop = menu()
print("Goodbye!")
|
import os
import time
import datetime
import osmnx as ox
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.colors as mpcol
import matplotlib
matplotlib.use('Agg')
ox.config(data_folder='../Data', logs_folder='../logs',
imgs_folder='../imgs', cache_folder='../cache',
use_cache=True, log_console=False, log_name='osmnx',
log_file=True, log_filename='osmnx')
cities = {'Amsterdam': 'Amsterdam, Netherlands',
'Budapest': 'Budapest, Hungary',
'Phoenix': 'Phoenix, Arizona, USA',
'Detroit': 'Detroit, Michigan, USA',
'Manhattan': 'Manhattan, New York City, New York, USA',
'Mexico': 'DF, Mexico',
'London': 'London, England',
'Singapore': 'Singapore, Singapore',
'Copenhagen': 'Copenhagen Municipality, Denmark',
'Barcelona': 'Barcelona, Catalunya, Spain',
'Portland': 'Portland, Oregon, USA',
'Bogota': 'Bogotá, Colombia',
'LA': 'Los Angeles, Los Angeles County, California, USA',
'Jakarta': 'Daerah Khusus Ibukota Jakarta, Indonesia'}
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
def load_graph(name, layer):
return ox.load_graphml('{}/{}_{}.graphml'.format(name, name, layer))
def get_colors(wcc):
norm = mpcol.Normalize(vmin=0, vmax=len(wcc), clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.tab20b)
colors = [mapper.to_rgba(x) for x in range(len(wcc))]
return colors
def get_wcc(G_bike):
wcc = [list(cc.edges())
for cc in list(nx.weakly_connected.weakly_connected_component_subgraphs(G_bike))]
wcc.sort(key=len, reverse=False)
return wcc
def plot_wcc_graph(G, name, wcc, path_plot, filter_wcc=False, n_cc=0):
if filter_wcc:
del wcc[:-n_cc]
colors = get_colors(wcc)
color_dict = {}
for n, cc in enumerate(wcc):
for e in cc:
color_dict[e] = colors[n]
color_edge = []
width = []
for e in G.edges():
if e in color_dict:
color_edge.append(color_dict[e])
width.append(1.55)
else:
color_edge.append('#c4c8ce')
width.append(0.2)
fig, ax = ox.plot_graph(G, node_size=0, edge_color=color_edge,
fig_height=10, edge_linewidth=width, show=False, close=False)
ax.set_title('{} Connected Components'.format(n_cc), fontdict={'fontsize': 12})
fig.suptitle('{} bicycle network\n'.format(name), y=0.94, fontsize=20)
if n_cc == 0:
plt.savefig(path_plot+'{}_AllCC.png'.format(name), dpi=200)
elif n_cc < 100 and n_cc < 10 and n_cc > 0:
plt.savefig(path_plot+'gif_{}_00{}.png'.format(name, n_cc), dpi=200)
elif n_cc < 100:
plt.savefig(path_plot+'gif_{}_0{}.png'.format(name, n_cc), dpi=200)
else:
plt.savefig(path_plot+'gif_{}_{}.png'.format(name, n_cc), dpi=200)
def main(cities):
for name in cities:
path_plot = '../imgs/ConnectedComponents/CCOverNetwork/{}/'.format(name)
assure_path_exists(path_plot)
st = time.time()
G_bike = load_graph(name, 'bike')
G_drive = load_graph(name, 'drive')
G = nx.compose(G_bike, G_drive)
print('{} loaded.'.format(name))
wcc = get_wcc(G_bike)
plot_wcc_graph(G, name, wcc, path_plot, filter_wcc=False, n_cc=0)
print(' First plot done')
for i in range(1, 31):
time_temp = time.time()
wcc = get_wcc(G_bike)
plot_wcc_graph(G, name, wcc, path_plot, filter_wcc=True, n_cc=i)
print(' {}/31 done in {} seg. Elapsed time: {} min'.format(i,
round(time.time()-time_temp, 2), round((time.time()-st)/60, 2)))
print('{} done in {} min.'.format(name, round((time.time()-st)/60, 2)))
print('All cities done')
if __name__ == '__main__':
main(cities)
|
# -*- coding: utf-8 -*-
#
# Author: Alberto Planas <aplanas@suse.com>
#
# Copyright 2019 SUSE LLC.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
:maintainer: Alberto Planas <aplanas@suse.com>
:maturity: new
:depends: None
:platform: Linux
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import re
from salt.exceptions import CommandExecutionError
LOG = logging.getLogger(__name__)
__virtualname__ = "suseconnect"
# Define not exported variables from Salt, so this can be imported as
# a normal module
try:
__opts__
__salt__
__states__
except NameError:
__opts__ = {}
__salt__ = {}
__states__ = {}
def __virtual__():
"""
SUSEConnect module is required
"""
return "suseconnect.register" in __salt__
def _status(root):
"""
Return the list of resitered modules and subscriptions
"""
status = __salt__["suseconnect.status"](root=root)
registered = [
"{}/{}/{}".format(i["identifier"], i["version"], i["arch"])
for i in status
if i["status"] == "Registered"
]
subscriptions = [
"{}/{}/{}".format(i["identifier"], i["version"], i["arch"])
for i in status
if i.get("subscription_status") == "ACTIVE"
]
return registered, subscriptions
def _is_registered(product, root):
"""
Check if a product is registered
"""
# If the user provides a product, and the product is registered,
# or if the user do not provide a product name, but some
# subscription is active, we consider that there is nothing else
# to do.
registered, subscriptions = _status(root)
if (product and product in registered) or (not product and subscriptions):
return True
return False
def registered(name, regcode=None, product=None, email=None, url=None, root=None):
"""
.. versionadded:: TBD
Register SUSE Linux Enterprise installation with the SUSE Customer
Center
name
If follows the product name rule, will be the name of the
product.
regcode
Subscription registration code for the product to be
registered. Relates that product to the specified subscription,
and enables software repositories for that product.
product
Specify a product for activation/deactivation. Only one product
can be processed at a time. Defaults to the base SUSE Linux
Enterprose product on this system.
Format: <name>/<version>/<architecture>
email
Email address for product registration
url
URL for the registration server (will be saved for the next
use) (e.g. https://scc.suse.com)
root
Path to the root folder, uses the same parameter for zypper
"""
ret = {
"name": name,
"result": False,
"changes": {},
"comment": [],
}
if not product and re.match(r"[-\w]+/[-\w\.]+/[-\w]+", name):
product = name
name = product if product else "default"
if _is_registered(product, root):
ret["result"] = True
ret["comment"].append("Product or module {} already registered".format(name))
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"].append("Product or module {} would be registered".format(name))
ret["changes"][name] = True
return ret
try:
__salt__["suseconnect.register"](
regcode, product=product, email=email, url=url, root=root
)
except CommandExecutionError as e:
ret["comment"].append("Error registering {}: {}".format(name, e))
return ret
ret["changes"][name] = True
if _is_registered(product, root):
ret["result"] = True
ret["comment"].append("Product or module {} registered".format(name))
else:
ret["comment"].append("Product or module {} failed to register".format(name))
return ret
def deregistered(name, product=None, url=None, root=None):
"""
.. versionadded:: TBD
De-register the system and base product, or in cojuntion with
'product', a single extension, and removes all its services
installed by SUSEConnect. After de-registration the system no
longer consumes a subscription slot in SCC.
name
If follows the product name rule, will be the name of the
product.
product
Specify a product for activation/deactivation. Only one product
can be processed at a time. Defaults to the base SUSE Linux
Enterprose product on this system.
Format: <name>/<version>/<architecture>
url
URL for the registration server (will be saved for the next
use) (e.g. https://scc.suse.com)
root
Path to the root folder, uses the same parameter for zypper
"""
ret = {
"name": name,
"result": False,
"changes": {},
"comment": [],
}
if not product and re.match(r"[-\w]+/[-\w\.]+/[-\w]+", name):
product = name
name = product if product else "default"
if not _is_registered(product, root):
ret["result"] = True
ret["comment"].append("Product or module {} already deregistered".format(name))
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"].append("Product or module {} would be deregistered".format(name))
ret["changes"][name] = True
return ret
try:
__salt__["suseconnect.deregister"](product=product, url=url, root=root)
except CommandExecutionError as e:
ret["comment"].append("Error deregistering {}: {}".format(name, e))
return ret
ret["changes"][name] = True
if not _is_registered(product, root):
ret["result"] = True
ret["comment"].append("Product or module {} deregistered".format(name))
else:
ret["comment"].append("Product or module {} failed to deregister".format(name))
return ret
|
import pygame
import math
import random
from params import *
from Setup import *
from Effect import *
import GameObject as go
def addVector(vector1 , vector2):
x = math.sin(vector1.angle)*vector1.length + math.sin(vector2.angle)*vector2.length
y = math.cos(vector1.angle) * vector1.length + math.cos(vector2.angle) * vector2.length
length = math.hypot(x,y)
angle = 0.5*math.pi - math.atan2(y,x)
return vector(angle,length)
def boxCollideBox(p1,p2):
dx = p1.x - p2.x
dy = p1.y - p2.y
distance = math.hypot(dy, dx)
if p1.getMinX() < p2.getMaxX() and p1.getMaxX() > p2.getMinX()and p1.getMinY()<p2.getMaxY() and p1.getMaxY() > p2.getMinY() :
if not p1.parent == None and not p2.parent== None:
if p1.parent.name == "Water" and p2.parent.name == "Player":
p2.parent.kill()
return
if p2.parent.name == "Water" and p1.parent.name == "Player":
p1.parent.kill()
return
if p1.isTrigger:
p1.hit(p2)
return
if p2.isTrigger:
p2.hit(p1)
return
# if p2.y < p1.y :
# p2.isInAir = False
# if p2.y > p1.y:
# p1.isInAir = False
if p1.getMaxX() < p2.getMinX() + 5 or p1.getMinX() + 10 > p2.getMaxX(): #collide offset
if p1.isInAir:
p1.isClimb = True
p1.isInAir = False
else:
p1.isClimb = False
if p1.getMaxY() < p2.getMinY() +5 :
p1.isInAir = False
p1.isClimb = False
tangent = math.atan2(dy, dx)
angle = tangent + 0.5 * math.pi
totalMass = p1.mass + p2.mass
# TODO calculate the overlap distance
overlap = 0.05 * (p1.width + p2.height - distance + 1)
if not p1.static:
p1.hit(p2)
p1.x += math.sin(angle) * overlap
p1.y -= math.cos(angle) * overlap
if not p2.static:
p2.hit(p1)
p2.x -= math.sin(angle) * overlap
p2.y += math.cos(angle) * overlap
p1.angle = 2 * tangent - p1.angle
p2.angle = 2 * tangent - p2.angle
vectorP1 = addVector(vector(p1.angle, p1.speed * (p1.mass - p2.mass) / totalMass),
vector(angle, 2 * p2.speed * p2.mass / totalMass))
(p1.angle, p1.speed) = (vectorP1.angle, vectorP1.length)
vectorP2 = addVector(vector(p2.angle, p2.speed * (p2.mass - p1.mass) / totalMass),
vector(angle + math.pi, 2 * p1.speed * p1.mass / totalMass))
(p2.angle, p2.speed) = (vectorP2.angle, vectorP2.length)
p1.speed *= elasticity
p2.speed *= elasticity
def circleCollideCircle(p1,p2):
dx = p1.x - p2.x
dy = p1.y - p2.y
distance = math.hypot(dy, dx)
if distance < p1.r + p2.r:
tangent = math.atan2(dy, dx)
angle = tangent + 0.5 * math.pi
totalMass = p1.mass + p2.mass
overlap = 0.5 * (p1.r + p2.r - distance + 1)
p1.x += math.sin(angle) * overlap
p1.y -= math.cos(angle) * overlap
p2.x -= math.sin(angle) * overlap
p2.y += math.cos(angle) * overlap
p1.angle = 2 * tangent - p1.angle
p2.angle = 2 * tangent - p2.angle
vectorP1 = addVector(vector(p1.angle, p1.speed * (p1.mass - p2.mass) / totalMass),
vector(angle, 2 * p2.speed * p2.mass / totalMass))
(p1.angle, p1.speed) = (vectorP1.angle, vectorP1.length)
vectorP2 = addVector(vector(p2.angle, p2.speed * (p2.mass - p1.mass) / totalMass),
vector(angle + math.pi, 2 * p1.speed * p1.mass / totalMass))
(p2.angle, p2.speed) = (vectorP2.angle, vectorP2.length)
p1.speed *= elasticity
p2.speed *= elasticity
#p1 box
#p2 circle
def boxCollideCircle(p1,p2):
dx = p1.x - p2.x
dy = p1.y - p2.y
distance = math.hypot(dy, dx)
if p1.getMinX() < p2.getMaxX() and p1.getMaxX() > p2.getMinX() and p1.getMinY() < p2.getMaxY() and p1.getMaxY() > p2.getMinY():
tangent = math.atan2(dy, dx)
angle = tangent + 0.5 * math.pi
totalMass = p1.mass + p2.mass
overlap = 0.5 * (p1.width + p2.r - distance + 1)
p1.x += math.sin(angle) * overlap
p1.y -= math.cos(angle) * overlap
p2.x -= math.sin(angle) * overlap
p2.y += math.cos(angle) * overlap
p1.angle = 2 * tangent - p1.angle
p2.angle = 2 * tangent - p2.angle
vectorP1 = addVector(vector(p1.angle, p1.speed * (p1.mass - p2.mass) / totalMass),
vector(angle, 2 * p2.speed * p2.mass / totalMass))
(p1.angle, p1.speed) = (vectorP1.angle, vectorP1.length)
vectorP2 = addVector(vector(p2.angle, p2.speed * (p2.mass - p1.mass) / totalMass),
vector(angle + math.pi, 2 * p1.speed * p1.mass / totalMass))
(p2.angle, p2.speed) = (vectorP2.angle, vectorP2.length)
p1.speed *= elasticity
p2.speed *= elasticity
def collide(p1,p2):
# bullet go through enemy if come from enemy
if not p1.parent == None:
if not p2.parent == None:
if p1.parent.name == 'Bullet' and p1.parent._from == p2.parent.name:
return
if p2.parent.name == 'Bullet' and p2.parent._from == p1.parent.name:
return
boxCollideBox(p1, p2)
class Particle:
def __init__(self, x,y,type,size=1,mass=1):
self.isTrigger = False
self.parent = None
self.x = x
self.y = y
self.size = size
self.colour = (0,0,255)
self.type = type
self.thickness = 1
self.speed =defaultSpeed
self.angle = math.pi/2
self.mass = mass
self.drag = drag
self.static = False
self.isInAir = False
self.isClimb = False
self.isAffectByGravity = True
def hit(self,particle):
if self.parent == None:
return
self.parent.hit(particle)
def jump(self):
if self.isInAir:
return
else:
self.isClimb = False
self.isInAir = True
self.firstMove = True
self.accelerate(jumpVector)
def moveLeft(self):
if self.isInAir:
if self.speed > 2 * moveSpeed:
self.speed = 2 * moveSpeed
temp = self.speed
self.accelerate(vector( -math.pi / 2,moveSpeed*airControllLoss))
self.speed = temp
return
else:
if self.speed > moveSpeed:
self.speed = moveSpeed
self.accelerate(vector(-math.pi/2,moveSpeed))
# self.speed = moveSpeed
# self.angle = -math.pi / 2
def moveRight(self):
if self.isInAir:
if self.speed > 2 * moveSpeed:
self.speed = 2 * moveSpeed
self.accelerate(vector(math.pi / 2,moveSpeed*airControllLoss))
return
else:
if self.speed > moveSpeed:
self.speed = moveSpeed
self.accelerate(vector( math.pi / 2,moveSpeed))
# self.speed = moveSpeed
# self.angle = math.pi / 2
def moveDown(self):
pass
def moveUp(self):
pass
def experienceGravity(self,vector1):
if self.isAffectByGravity:
self.accelerate(vector1)
def accelerate(self, vector1):
val = addVector(vector(self.angle,self.speed),vector1)
(self.angle, self.speed) = (val.angle,val.length)
def experienceDrag(self):
self.speed *= self.drag
def move(self):
if not self.static :
self.x += math.sin(self.angle)*self.speed
self.y -= math.cos(self.angle)*self.speed
if not self.parent == None:
self.parent.updatePosition()
def bounce(self,width,height):
pass
def moveMouse(self,x,y):
dx = x - self.x
dy = y - self.y
self.angle = 0.5 * math.pi + math.atan2(dy, dx)
self.speed = math.hypot(dx, dy) *0.1
# type = 1 circle
# type = 2 box
class CircleParticle(Particle,object):
def __init__(self,x,y,r,mass=1):
self.r = r
self.size = math.pi * r**2/2
self.type = 1
self.mass = mass
super(CircleParticle,self).__init__(x,y,self.type,self.size,self.mass)
def getMaxX(self):
return self.x + self.r
def getMinX(self):
return self.x
def getMaxY(self):
return self.y +self.r
def getMinY(self):
return self.y
def bounce(self,width,height):
if self.x > width - self.r:
self.speed*= elasticity
self.x = 2 * (width - self.r) - self.x
self.angle = - self.angle
elif self.x < self.r:
self.speed *= elasticity
self.x = 2 * self.r - self.x
self.angle = - self.angle
if self.y > height - self.r:
self.speed *= elasticity
self.y = 2 * (height - self.r) - self.y
self.angle = math.pi - self.angle
elif self.y < self.r:
self.speed *= elasticity
self.y = 2 * self.r - self.y
self.angle = math.pi - self.angle
class BoxParticle(Particle,object):
def __init__(self,x,y,width,height,mass = 1):
self.width = width
self.height = height
self.type = boxType
self.size = self.width*self.height
self.mass = mass
super(BoxParticle, self).__init__(x, y, self.type, self.size, self.mass)
def getMaxX(self):
return self.x + self.width
def getMinX(self):
return self.x
def getMaxY(self):
return self.y +self.height
def getMinY(self):
return self.y
def bounce(self,width,height):
if self.x > width - self.width:
#self.isInAir = False
self.hit(None)
self.speed*= elasticity
self.x = 2 * (width - self.width) - self.x
self.angle = - self.angle
elif self.x < 0:
#self.isInAir = False
self.hit(None)
self.speed *= elasticity
self.x = 2 * self.width - self.x
self.angle = - self.angle
if self.y > height - self.height:
self.isInAir = False
self.hit(None)
self.speed *= elasticity
self.y = 2 * (height - self.height) - self.y
self.angle = math.pi - self.angle
elif self.y < 0:
# self.isInAir = False
self.hit(None)
self.speed *= elasticity
self.y = 2 * self.height - self.y
self.angle = math.pi - self.angle
class BoxTrigger(BoxParticle, object):
def __init__(self,x,y,width,height,env,mass = 1):
super(BoxTrigger, self).__init__(x, y, width, height)
self.static = True
self.isAffectByGravity = False
self.isTrigger = True
self.env = env
self.times = 1
self.currentTime = 0
self.isHit = False
self.env.particles.append(self)
def hit(self, particle):
if particle.parent.name == 'Player':
if self.currentTime < self.times:
p = TruckEffect(self.x+1000, self.y - 120, truckRun, 1, 1, self.env)
self.currentTime +=1
self.isHit = True
class DuTrigger(BoxParticle, object):
def __init__(self,x,y,width,height,env,mass = 1):
super(DuTrigger, self).__init__(x, y, width, height)
self.static = True
self.isAffectByGravity = False
self.isTrigger = True
self.env = env
self.times = 1
self.currentTime = 0
self.numberOfEnemy = random.randint(3,5)
self.isHit = False
self.env.particles.append(self)
def hit(self, particle):
if particle.parent.name == 'Player':
if self.currentTime < self.times:
i = -5
while self.numberOfEnemy >0:
e = go.Enemy(self.x+i*100,self.y- 1000 - random.randint(-200,200),playerSize[0],playerSize[1],self.env.player,self.env)
self.numberOfEnemy -= 1
i+=1
self.currentTime +=1
self.isHit = True
class CoinTrigger(BoxParticle, object):
def __init__(self,x,y,width,height,env,mass = 1):
super(CoinTrigger, self).__init__(x, y, width, height)
self.static = True
self.isAffectByGravity = False
self.isTrigger = True
self.env = env
self.env.particles.append(self)
self.times = 1
self.currentTime = 0
self.isHit = False
self.c = CoinEffect(self.x,self.y,coin,1,1,self.env)
def hit(self, particle):
if particle.parent.name == 'Player':
if self.currentTime < self.times:
# add coin to player
self.env.player.coin +=1
self.env.removeParticle(self)
self.env.removeEffect(self.c)
self.currentTime +=1
self.isHit = True
class UpgradeTrigger(BoxParticle, object):
def __init__(self,x,y,width,height,env,mass = 1):
super(UpgradeTrigger, self).__init__(x, y, width, height)
self.static = True
self.isAffectByGravity = False
self.isTrigger = True
self.env = env
self.env.particles.append(self)
self.times = 1
self.currentTime = 0
self.isHit = False
self.c = CoinEffect(self.x,self.y,upgrade,1,1,self.env)
def hit(self, particle):
if particle.parent.name == 'Player':
if self.currentTime < self.times:
self.env.player.upgrade +=1
self.env.player.coin +=1
self.env.removeParticle(self)
self.env.removeEffect(self.c)
self.currentTime +=1
self.isHit = True |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
# PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "{{ secret_key }}"
DEBUG = True
# Application definition
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
THIRDPARTY_APPS = [
'whitenoise.runserver_nostatic',
'allauth',
'allauth.account',
'allauth.socialaccount',
'oauth2_provider',
'rest_framework',
]
LOCAL_APPS = [
]
INSTALLED_APPS = DEFAULT_APPS + THIRDPARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/assets/'
STATIC_ROOT = os.path.join(ROOT, 'assets', 'collected-static')
STATICFILES_DIRS = (
os.path.join(ROOT, 'assets'),
)
# Media Settings
MEDIA_ROOT = 'mediafiles'
MEDIA_URL = '/media/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Authentication Settings
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "none"
# ACCOUNT_SIGNUP_FORM_CLASS = 'app.forms.SignupForm'
# Auth By Pass
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/accounts/logout/' |
# Write for loops to produce the following output:
# 1
# 22
# 333
# 4444
# 55555
# 666666
# 7777777
number_one = 1
height_of_triangle = int(input("Enter height of triangle: "))
print(number_one)
for i in range(height_of_triangle - 1):
number_one = number_one + 1
for j in range(number_one):
print(number_one, end= "")
print("")
|
# Generated by Django 2.1.1 on 2018-11-19 18:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AnswerTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=30000)),
('creationTime', models.DateTimeField(null=True)),
('modifiedTime', models.DateTimeField(null=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'AnswerTable',
},
),
migrations.CreateModel(
name='CommentTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1024)),
('creationTime', models.DateTimeField(null=True)),
('modifiedTime', models.DateTimeField(null=True)),
('answer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.AnswerTable')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'CommentTable',
},
),
migrations.CreateModel(
name='FollowTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL)),
('following', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'FollowTable',
},
),
migrations.CreateModel(
name='NotificationTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.CommentTable')),
('fromUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fromUser', to=settings.AUTH_USER_MODEL)),
('toUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='toUser', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'NotificationTable',
},
),
migrations.CreateModel(
name='QuestionTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reputationThreshold', models.IntegerField(default=0)),
('title', models.CharField(max_length=200)),
('body', models.CharField(max_length=30000)),
('creationTime', models.DateTimeField(null=True)),
('modificationTime', models.DateTimeField(null=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'QuestionTable',
},
),
migrations.CreateModel(
name='ScoreTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scoreVal', models.IntegerField()),
('answer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.AnswerTable')),
('question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.QuestionTable')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'ScoreTable',
},
),
migrations.CreateModel(
name='TagTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
options={
'verbose_name_plural': 'TagTable',
},
),
migrations.AddField(
model_name='questiontable',
name='tags',
field=models.ManyToManyField(to='IDK_rest.TagTable'),
),
migrations.AddField(
model_name='commenttable',
name='question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.QuestionTable'),
),
migrations.AddField(
model_name='answertable',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='IDK_rest.QuestionTable'),
),
migrations.AddField(
model_name='answertable',
name='tags',
field=models.ManyToManyField(to='IDK_rest.TagTable'),
),
]
|
# Generated by Django 3.1.1 on 2020-11-13 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Investimento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=10)),
('name', models.CharField(max_length=100)),
('region', models.CharField(max_length=100)),
('currency', models.CharField(max_length=20)),
('timeOpen', models.CharField(max_length=20)),
('timeClose', models.CharField(max_length=20)),
('timeZone', models.CharField(max_length=20)),
('marketCap', models.FloatField()),
('price', models.FloatField()),
('changePercent', models.FloatField()),
('data', models.CharField(max_length=100)),
],
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 14:36:29 2018
@author: lee
"""
import os
from PIL import Image
from collections import Counter
import re
import pandas as pd
file_list = []
path_list = os.listdir('./')
##筛选都有的icon
'''
for path in path_list:
if '_ic_' in path:
print(path)
files = os.listdir('./'+path)
file_list.extend(files)
print(len(files))
a = pd.Series(Counter(file_list))
icons = {}
icon_list = list(a[a==6].index)
for i in icon_list:
j = re.split('.png',i)[0]
print(j)
icons[j] = i
icons = str(icons)
with open("./icon_list.txt","w") as f:
f.write(icons)
'''
filename = r'./Gold Silk Rose Theme/auto_180x120.jpg'
def get_size(file):
img = Image.open(file)
imgSize = img.size #图片的长和宽
maxSize = max(imgSize) #图片的长边
minSize = min(imgSize) #图片的短边
return maxSize,minSize
def define(file):
imgSize = get_size(file)
if imgSize == (180,120):
print('1')
|
from ckeditor.widgets import CKEditorWidget
from django import forms
from .models import Requirement
class RequirementForm(forms.ModelForm):
long_description = forms.CharField(widget=CKEditorWidget())
short_description = forms.CharField(widget=forms.Textarea)
class Meta:
model = Requirement
fields = '__all__'
|
# -*- coding: utf-8 -*-
import json
# sys.path.append ("tiny_func")
from tiny_func import get_counts2
path = "pydata-book-2nd-edition/datasets/bitly_usagov/example.txt"
records = [json.loads(line) for line in open(path,encoding = "utf-8")]
time_zones = [rec["tz"] for rec in records if "tz" in rec]
tz_counts = get_counts2.count(time_zones)
print (tz_counts['America/New_York'] )
print (len(time_zones))
|
z=int(input())
xyz=list(map(str,input().split()))
xyz=sorted(xyz,reverse=True)
print(("".join(xyz)))
|
#!/usr/bin/python
import os.path
from libs.ioScore import *
from libs.ioPDBbind import *
from libs.constConf import *
scoreListGOLD = ['plp', 'goldscore', 'chemscore', 'asp']
scoreListXSCORE = ['HPScore', 'HMScore', 'HSScore']
scoreListGLIDE = ['SP', 'XP']
scoreListPARA = ['DrugScore', 'pScore', 'PMF']
CASFyear = '2013'
def readScore(CASFyear):
proteinDir = CASF_PATH[CASFyear]
indexFile = CASF_CORE_INDEX[CASFyear]
data = parse_index(proteinDir, indexFile)
outputDir = os.path.join(OUTPUT_DIR, "PDBbind", CASF_VERSION[CASFyear])
scoreDict = {}
for protein in data.keys():
scoreDict[protein] = [float(data[protein]['pKx'])]
##### read score from GOLD #####
for score in scoreListGOLD:
#scoreFile = os.path.join(outputDir, 'gold', score, protein, 'rescore.log')
scoreFile = os.path.join(outputDir, 'gold', score, protein, 'bestranking.lst')
readScore = readGOLDScore(scoreFile)
#print protein, ' ', score,'\t', readScore
scoreDict[protein].append(readScore)
##### read score from XScore #####
scoreFile = os.path.join(outputDir, 'xscore', protein+'.table')
readScore = readXScore(scoreFile)
for eachScore in readScore:
scoreDict[protein].append(eachScore)
##### read score from DSX #####
scoreFile = os.path.join(outputDir, 'dsx', 'DSX_'+protein+'_protein_prep_'+protein+'_ligand.txt')
readScore = readDSXScore(scoreFile)
scoreDict[protein].append(readScore)
##### read score from ParaDocks #####
scoreFile = os.path.join(outputDir, 'paradocks', protein ,'ParaDockS_results.table')
readScore = readPARAScore(scoreFile)
for eachScore in readScore:
scoreDict[protein].append(eachScore)
scoreList = ['PDB','experimental'] + scoreListGOLD + scoreListXSCORE + ['DSX'] + scoreListPARA
CSVfile = os.path.join(OUTPUT_DIR, "scores", CASF_NAME[CASFyear]+'_core_scores.csv')
#print(CSVfile)
writeScoreCSV(scoreList, scoreDict, outFile=CSVfile)
readScore('2007')
readScore('2012')
readScore('2013') |
#! /usr/bin/python
import sys
import os
from setuptools import setup, Extension
version = "0.1.4"
setup(
name="mongorm",
version=version,
packages=["mongorm"],
author="Simversity Inc.",
author_email="dev@simversity.com",
url="http://simversity.github.io/mongorm",
license="http://www.apache.org/licenses/LICENSE-2.0",
description='''Python based ORM for MongoDB''',
zip_safe=False
)
|
import time
import math
import statistics
def selectionsort(L):
start = time.time()
for i in range(len(L)):
for j in range(i+1, len(L)):
while L[j] < L[i]:
L[i],L[j] = L[j],L[i]
end = time.time()
return L, end - start
def sumksquare(k):
start = time.time()
total = 0
for i in range(1,k+1):
total += i**2
end = time.time()
return total, end - start
def sumksquare2(k):
start = time.time()
total = k*(k+1)*(2*k+1)/6
end = time.time()
return total, end - start
#Calculate the time taken for 20 trials
def timetrials(func, k, trials =20):
totaltime = 0
for i in range(trials):
totaltime += func(k)[1]
return totaltime/trials
### Call the sumksquare2 function and obtain the total time
T = [timetrials(sumksquare2, 10000, 10000)]
T.append(timetrials(sumksquare2, 100000, 10000))
T.append(timetrials(sumksquare2, 1000000, 10000))
T.append(timetrials(sumksquare2, 10000000, 10000))
###Estimate the order of grwoth for sumksquare2
m = int(statistics.mean([math.log(T[i+1]/T[i], 10) for i in range(len(T)-1)])+ 0.5)
print("The order of growth for running time of sumksquare2 is n^%d" % m)
### Call the sumksquare function and obtain the total time
T = [timetrials(sumksquare, 1000)]
T.append(timetrials(sumksquare, 10000))
T.append(timetrials(sumksquare, 100000))
T.append(timetrials(sumksquare, 1000000))
###Estimate the order of grwoth for sumksquare
m = int(statistics.mean([math.log(T[i+1]/T[i], 10) for i in range(len(T)-1)])+ 0.5)
print("The order of growth for running time of sumksquare is n^%d" % m)
### Call the selectionsort function and obtain the total time
L = list(range(10))
T = [timetrials(selectionsort, L)]
L = list(range(100))
T.append(timetrials(selectionsort, L))
L = list(range(1000))
T.append(timetrials(selectionsort, L))
###Estimate the order of grwoth for selectionsort
m = int(statistics.mean([math.log(T[i+1]/T[i], 10) for i in range(1, len(T)-1)])+ 0.5)
print("The order of growth for running time of selection sort is n^%d" % m)
### A list with values in the descending order
L = list(range(10, 0, -1))
### Call the selectionsort function and obtain the total time
T = [timetrials(selectionsort, L)]
L = list(range(100, 0, -1))
T.append(timetrials(selectionsort, L))
L = list(range(1000, 0, -1))
T.append(timetrials(selectionsort, L))
###Estimate the order of grwoth for selectionsort
m = int(statistics.mean([math.log(T[i+1]/T[i], 10) for i in range(1, len(T)-1)])+ 0.5)
print("The order of growth for running time of selection sort is n^%d" % m)
|
# Игра крестики - нолики, Автор: Панков Ю. А.
# _*_ coding: utf-8 _*_
import random
def draw_field(field):
"""
Функция отрисовки поля
"""
print("-------------")
for i in range(3): # после первой отрисвки для красоты можно не отрисовывать цифры
print("|", field[0+i*3], "|", field[1+i*3], "|", field[2+i*3], "|")
print("-------------")
def gamer_move_input(gamer_object):
"""
Функция ввода пользователем хода, внутри функции есть проверка на корректность введённого хода
"""
check_move = True
while check_move != False:
gamer_move = input("Введите число от 1 до 9, на место которого будет поставлен " + gamer_object + " ")
try:
gamer_move = int(gamer_move)
except:
print("Ошибка! Вы точно ввели число?")
continue
if gamer_move >= 1 and gamer_move <= 9:
if gamer_move < 4: # К1, сделано для облегчения игры на боковой клавиатуре
gamer_move = gamer_move + 6
else:
if gamer_move > 6:
gamer_move = gamer_move - 6
if gamer_move != field[gamer_move - 1]:
print("Ошибка! В этой клетке уже что-то стоит")
else:
field[gamer_move-1] = gamer_object
check_move = False
else:
print("Ошибка! Введите число от 1 до 9, чтобы продолжить")
def ai_move_input(gamer_object, count):
"""
Функция хода ИИ
"""
if gamer_object == "X": # Определение фигуры ИИ
ai_object = "O"
else:
ai_object = "X"
if win_comb_check(ai_object) != False: # Проверка на наличие победной комбинации для ИИ
field[win_comb_check(ai_object)-1] = ai_object
return
if win_comb_check(gamer_object) != False: # Проверка на наличие победной комбинации для игрока
field[win_comb_check(gamer_object)-1] = ai_object
return
if field[4] == 5: # Первым ходом всегда выгодно занять центральную клетку
field[4] = ai_object
return
if count == 1 and ai_object == "X" or count == 0 and ai_object == "O": # Х2/О1 ставим фигуру на одну из клеток свободной диагонали
if field[0] == 1 and field[8] == 9 and ai_object == "O" and field[1] == 1 and field[3] == 3:
field[random.choice([0, 8, 0, 8, 3, 0, 8, 0, 1, 0, 8, 3])] = ai_object # Даёт примерно 20% шанс ИИ на ошибку = игроку на победу
if field[0] == 1 and field[8] == 9:
field[random.choice([0, 8])] = ai_object
return
if field[2] == 3 and field[6] == 7:
field[random.choice([2, 6])] = ai_object
return
if count == 2 and ai_object == "X": # Х3
if field[0] == 1 and field[1] == 2 and field[3] == 4:
field[0] = ai_object
return
if field[1] == 2 and field[5] == 6 and field[2] == 3:
field[2] = ai_object
return
if field[6] == 7 and field[3] == 4 and field[7] == 8:
field[6] = ai_object
return
if field[8] == 9 and field[7] == 8 and field[5] == 6:
field[8] = ai_object
return
if count > 2 and ai_object == "X": # дальнейшии ходы Х
i = 1
while i < 8:
if field[i] == i+1:
field[i] = ai_object
return
i += 2
if count == 1 and ai_object == "O": # О2
if field[1] == 2 and field[7] == 8:
field[random.choice([1, 7])] = ai_object
return
if field[3] == 4 and field[5] == 6:
field[random.choice([3, 5])] = ai_object
return
i = 0
while i < 9: # Если для какой-то фигуры не удалось выбрать место, ставим её в любую свободную крлетку
if field[i] == i + 1:
field[i] = ai_object
return
i += 1
def win_comb_check(object):
"""
Функция проверки игрового поля на наличие выигрышной комбинации у определённой фигуры (Х/О) при следующем ходе
"""
win_comb = ((0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6), (0,3,6), (1,4,7), (2,5,8))
for each in win_comb:
if field[each[0]] == field[each[1]] == object and field[each[2]] == each[2] + 1:
return field[each[2]]
if field[each[0]] == field[each[2]] == object and field[each[1]] == each[1] + 1:
return field[each[1]]
if field[each[2]] == field[each[1]] == object and field[each[0]] == each[0] + 1:
return field[each[0]]
return False
def win_check(field):
"""
Функция проверки игрового поля на наличие выигрышной комбинации
"""
win_comb = ((0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6), (0,3,6), (1,4,7), (2,5,8))
for each in win_comb:
if field[each[0]] == field[each[1]] == field[each[2]]:
return field[each[0]]
return False
def choose_gamer_object():
"""
Функция выбора игроком фигуры (Х/О)
"""
check_move = True
while check_move != False:
gamer_object = input("Введите X или O для выбора, X ходят первые ")
posible_gamer_objects_o = ("O", "o", "О", "о", "0")
posible_gamer_objects_x = ("X", "x", "Х", "х")
flag_error_choice = 0
for each in posible_gamer_objects_o:
if gamer_object == each:
flag_error_choice += 1
gamer_object = "O" # англ. больш. о
for each in posible_gamer_objects_x:
if gamer_object == each:
flag_error_choice += 1
gamer_object = "X" # англ. больш. х
if flag_error_choice == 0:
print("Ошибка! Выберете X или O чтобы продолжить")
continue
else:
return gamer_object
def main(field):
"""
Основная функция, в ней происходит игра
"""
gamer_object = choose_gamer_object()
counter = 0
count_ai = 0
win = False
while win != True:
draw_field(field_g)
if counter % 2 == 0:
if gamer_object == "X":
gamer_move_input(gamer_object)
else:
ai_move_input(gamer_object, count_ai)
count_ai += 1
else:
if gamer_object == "X":
ai_move_input(gamer_object, count_ai)
count_ai += 1
else:
gamer_move_input(gamer_object)
counter += 1
i = 0
while i < 9:
if field[i] == "X" or field[i] == "O":
field_g[i] = field[i]
else:
field_g[i] = " "
i += 1
if counter > 4:
win_object = win_check(field)
if win_object:
if win_object == gamer_object:
draw_field(field_g)
print("Вы выиграли!")
return
else:
draw_field(field_g)
print("Вы проиграли :(")
return
if counter == 9:
draw_field(field_g)
print("Ничья!")
return
field = [i for i in range(1, 10)]
field_g = [7, 8, 9, 4, 5, 6, 1, 2, 3] # К1, сделано для облегчения игры на боковой клавиатуре
main(field)
while 1:
"""
Цикл перезапускаа игры
"""
repeat_g = input("Хотите сыграть ещё раз? (Y/N) ")
if repeat_g == "Y" or repeat_g == "y":
field = [i for i in range(1, 10)]
field_g = [7, 8, 9, 4, 5, 6, 1, 2, 3]
main(field)
if repeat_g == "N" or repeat_g == "n":
print("До свидания :)")
break
else:
continue |
#!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess as sp
import cgi
form = cgi.FieldStorage()
osimage = form.getvalue("x")
cmd = "sudo docker history {}".format(osimage)
output = sp.getstatusoutput(cmd)
print(output)
~
~
|
class WhoIsError(Exception):
pass
class QueryError(WhoIsError):
pass
class NotFoundError(WhoIsError):
pass
|
'''
Given a lowercase string that has alphabetic characters only and no spaces, return the highest value of consonant substrings. Consonants are any letters of the alphabet except "aeiou".
We shall assign the following values: a = 1, b = 2, c = 3, .... z = 26.
For example, for the word "zodiacs", let's cross out the vowels. We get: "z o d ia cs"
'''
import re
def smash(strinr):
return sum([ord(i)-96 for i in strinr])
def solve(st):
res = re.findall(r'[^aeiou]+',st)
lst = []
for i in res:
if len(i) == 1:
lst.append(ord(i)-96)
else:
lst.append(smash(i))
return max(lst)
|
#In this assignment you will read through and parse a file with text and numbers. You will extract all the numbers in the file and compute the sum of the numbers.
"""
Data Files
We provide two files for this assignment. One is a sample file where we give you the sum for your testing and the other is the actual data you need to process for the assignment.
Sample data: http://py4e-data.dr-chuck.net/regex_sum_42.txt (There are 90 values with a sum=445833)
Actual data: http://py4e-data.dr-chuck.net/regex_sum_960592.txt (There are 78 values and the sum ends with 496)
These links open in a new window. Make sure to save the file into the same folder as you will be writing your Python program. Note: Each student will have a distinct data file for the assignment - so only use your own data file for analysis.
"""
#CODE
import re
hand = open("regex_sum_960592.txt") #Make sure you have already pasted the text in a text document from the actual data link provided above and terminate the code in Jupyter Notebook or any other one which you like to work on
x=list()
for line in hand:
y = re.findall('[0-9]+',line)
x = x+y
sum=0
for z in x:
sum = sum + int(z)
print(sum) |
import sys
import vptree
from scipy import spatial
import numpy as np
if __name__ == '__main__':
X = np.random.randn(10, 10)
# knn = spatial.cKDTree(X, leafsize=10)
vptree = vptree.vptree(sparse = True)
X = np.asarray([[[1,2], [2,5], [3,4]],
[[1,5],[3,1],[5,1]],
[[1,0.1],[2,5],[10,4]],
[[2,4],[9,1],[10,1]],
[[1,3], [3,3], [5,6]],
[[1,3], [3,3], [5,7]],
[[1,2], [10, 9], [100,10]],
[[1,2], [100, 9], [101,10]],
[[1,2], [100, 9], [121,10]],
[[1,2], [100, 9], [131,10]],
[[1,10], [100, 9], [131,10], [140,1]]
])
vptree.init_sparse(X)
print np.asarray(X).ndim
for i in range(100):
Y = np.array([[[1,10], [10, 9], [131,10], [150,12]], [[1,4]]])
a = np.random.random_integers(100)
Y[0][0][0] = np.random.random_integers(100)
Y[0][0][1] = a
a = np.random.random_integers(100)
Y[0][1][1] = a
a = np.random.random_integers(100)
Y[0][2][1] = a
a = np.random.random_integers(100)
Y[1][0][1] = a
X = np.concatenate((X, Y))
# Y = np.array([[[1,10], [10, 9], [131,10], [150,12]], [[1,4]]])
# X = np.concatenate((X, Y))
val = np.asarray([[1,2], [2,5.], [3,4]])
print 'search'
for i in range(10000):
res = vptree.search_sparse(val, 100, epsilon = 5.)
print repr(res)
print X[res[0][1]]
sys.exit(0)
for i in range(len(X)):
res = vptree.search(X[i], 10)
print repr(res)
res = vptree.search(X[0][:5], 10)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 10:53:51 2020
@author: scro3517
"""
import numpy as np
import os
import pickle
from tqdm import tqdm
import pandas as pd
from itertools import compress
#%%
def load_paths(basepath):
""" File Names to Load """
files = os.listdir(os.path.join(basepath,'ECGDataDenoised'))
paths_to_files = [os.path.join(basepath,'ECGDataDenoised',file) for file in files]
return paths_to_files
#%%
def modify_df(output_type='single'):
basepath = '/mnt/SecondaryHDD/chapman_ecg'
""" Database with Patient-Specific Info """
df = pd.read_csv(os.path.join(basepath,'Diagnostics.csv'))
dates = df['FileName'].str.split('_',expand=True).iloc[:,1]
dates.name = 'Dates'
dates = pd.to_datetime(dates)
df = pd.concat((df,dates),1)
""" Combine Rhythm Labels """
old_rhythms = ['AF','SVT','ST','AT','AVNRT','AVRT','SAAWR','SI','SA']
new_rhythms = ['AFIB','GSVT','GSVT','GSVT','GSVT','GSVT','GSVT','SR','SR']
df['Rhythm'] = df['Rhythm'].replace(old_rhythms,new_rhythms)
""" Add Date Column """
def combine_dates(date):
new_dates = ['All Terms']#use this for continual learning dataset ['Term 1','Term 2','Term 3']
cutoff_dates = ['2019-01-01']##use this for continual learning dataset ['2018-01-16','2018-02-09','2018-12-30']
cutoff_dates = [pd.Timestamp(date) for date in cutoff_dates]
for t,cutoff_date in enumerate(cutoff_dates):
if date < cutoff_date:
new_date = new_dates[t]
break
return new_date
df['Dates'] = df['Dates'].apply(combine_dates)
""" Replace Certain Column Names With Questions """
question_prefix = 'What is the '
columns = df.loc[:,'VentricularRate':'TOffset'].columns
new_columns = list(map(lambda column: question_prefix + column + '?',columns))
column_mapping = dict(zip(columns,new_columns))
df.rename(columns=column_mapping,inplace=True)
""" Scale Values in the Columns """
min_values = df[new_columns].min()
max_values = df[new_columns].max()
df[new_columns] = 2*(df[new_columns] - min_values)/(max_values - min_values) - 1
return df, max_values, min_values
#%%
def obtain_phase_to_paths_dict(df,paths_to_files,nquestions=1,input_type='single'):
phases = ['train','val','test']
phase_fractions = [0.6, 0.2, 0.2]
phase_fractions_dict = dict(zip(phases,phase_fractions))
terms = ['All Terms']##use this for continual learning dataset ['Term 1','Term 2','Term 3']
""" Obtain Mapping from Phase to List of Filenames (From DataFrame) """
phase_to_term_to_filenames = dict()
for term in terms:
phase_to_term_to_filenames[term] = dict()
term_patients = df['FileName'][df['Dates'] == term]
random_term_patients = term_patients.sample(frac=1,random_state=0)
start = 0
for phase,fraction in phase_fractions_dict.items():
if phase == 'test':
phase_patients = random_term_patients.iloc[start:].tolist() #to avoid missing last patient due to rounding
else:
npatients = int(fraction*len(term_patients))
phase_patients = random_term_patients.iloc[start:start+npatients].tolist()
phase_to_term_to_filenames[term][phase] = phase_patients
start += npatients
""" Obtain Filenames Only (Without Entire Path) """
paths_to_filenames = list(map(lambda path: path.split('/')[-1].split('.csv')[0], paths_to_files))
paths_to_filenames_df = pd.Series(paths_to_filenames)
""" Obtain Paths For Each Phase """
phase_to_paths = dict()
phase_to_leads = dict()
phase_to_segment_number = dict()
phase_to_questions = dict()
nsegments = 2
""" Determine Whether Each Input Contains Single Or Multiple Leads """
if input_type == 'single': #input will contain a single lead i.e. X = (B,1,L)
leads = ['I', 'II', 'III', 'AVR', 'AVL', 'AVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']
nleads = 12
elif input_type == 'multi': #input will contain all leads i.e. X = (B,12,L)
leads = ['All'] #filler
nleads = 1 #to avoid repeating path an unnecessary number of times
for term,phase_to_filenames in phase_to_term_to_filenames.items():
for phase,filenames in tqdm(phase_to_filenames.items()):
""" Obtain Paths For All Leads """ #(Are you in dataframe?)
paths = list(compress(paths_to_files,paths_to_filenames_df.isin(filenames).tolist()))
""" Check if Path Data is Valid and Return Only Valid Paths """
print(len(paths))
paths = check_if_data_is_valid(paths)
print(len(paths))
paths_for_all_leads = np.repeat(paths,nleads*nsegments*nquestions)
""" Obtain Leads Label """
repeated_leads = np.repeat(leads,nsegments*nquestions) #for each path
all_leads = np.tile(repeated_leads,len(paths))
""" Obtain Segment Numbers (First Segment and Second Segment of Recording) """
segment_number = [0,1]
segment_number = np.tile(segment_number,nleads*nquestions) #for each path
all_segment_numbers = np.tile(segment_number,len(paths))
""" Obtain Questions """
questions = list(compress(df.columns.tolist(),['What' in column for column in df.columns]))
questions = np.tile(questions,nleads*nsegments) #for each path
all_questions = np.tile(questions,len(paths))
""" Assign Paths and Leads Labels """
phase_to_paths[phase] = paths_for_all_leads
phase_to_leads[phase] = all_leads
phase_to_segment_number[phase] = all_segment_numbers
phase_to_questions[phase] = all_questions
return phase_to_paths, phase_to_leads, phase_to_segment_number, phase_to_questions
def check_if_data_is_valid(paths):
new_paths = []
for path in tqdm(paths):
data = pd.read_csv(path)
data_np = data.to_numpy()
""" Is Matrix Entirely Zeros? """
all_zero_condition = np.sum(data_np == 0) == data.size
""" Is There a NaN in the Data? """
nan_condition = np.sum(np.isnan(data_np)) > 0
if all_zero_condition or nan_condition:
continue
else:
new_paths.append(path)
return new_paths
#%%
basepath = '/mnt/SecondaryHDD/chapman_ecg'
""" Identify Encoder Based on Output Type """
input_type = 'multi' #options: 'single' is single lead input | 'multi' is multiple lead input
output_type = 'single' #options: 'single' is single output | 'multi' is multi-output
goal = 'VQA' #options: 'VQA' | 'Supervised'
if __name__ == '__main__':
paths_to_files = load_paths(basepath)
df, max_values, min_values = modify_df(output_type=output_type)
if goal == 'VQA': #generate data for VQA setting
nquestions = sum(['What' in column for column in df.columns])
else: #generate data for traditional supervised setting
nquestions = 1
phase_to_paths, phase_to_leads, phase_to_segment_number, phase_to_questions = obtain_phase_to_paths_dict(df,paths_to_files,nquestions=nquestions,input_type=input_type)
savepath = os.path.join(basepath,'patient_data',goal,'%s_input' % input_type,'%s_output' % output_type)
if not os.path.exists(savepath):
os.makedirs(savepath)
""" Save Paths """
with open(os.path.join(savepath,'phase_to_paths.pkl'),'wb') as f:
pickle.dump(phase_to_paths,f)
""" Save Leads """
with open(os.path.join(savepath,'phase_to_leads.pkl'),'wb') as f:
pickle.dump(phase_to_leads,f)
""" Save Segment Number """
with open(os.path.join(savepath,'phase_to_segment_number.pkl'),'wb') as f:
pickle.dump(phase_to_segment_number,f)
""" Save Questions """
with open(os.path.join(savepath,'phase_to_questions.pkl'),'wb') as f:
pickle.dump(phase_to_questions,f)
#
|
__author__ = 'Antoine'
from naturalLanguagePython.countryDomain.countryRepository import CountryRepository
from naturalLanguagePython.countryDomain.country import Country
class CountryRepositoryDB(CountryRepository):
def __init__(self):
self.countryList = []
def addCountry(self, country):
self.countryList.append(country)
def searchCountries(self, keywordDictionary, searchStrategy = None):
listOfPossibleCountryByKeyword = []
for keyword in keywordDictionary:
listOfPossibleCountry = []
for country in self.countryList:
if country.contains(keyword, keywordDictionary[keyword], searchStrategy):
listOfPossibleCountry.append(country.name)
listOfPossibleCountryByKeyword.append(listOfPossibleCountry)
return listOfPossibleCountryByKeyword
|
# Configuration test file
from bibliopixel.led import*
#~ from bibliopixel.animation import MatrixCalibrationTest
from bibliopixel.drivers.APA102 import*
import bibliopixel.colors as colors
#~ from LEDfuncs import*
#~ from time import sleep
#~ from animation import*
#~ import numpy
# Global Vars
NUM = 8*8
rainbow = [colors.Red, colors.Orange, colors.Yellow, colors.Green, colors.Blue, colors.Indigo, colors.Violet]
Red = colors.Red
Orange = colors.Orange
Yellow = colors.Yellow
Green = colors.Green
Blue = colors.Blue
Indigo = colors.Indigo
Violet = colors.Violet
#create driver for a 8*8 grid, use the size of your display
driver = DriverAPA102(NUM, c_order = ChannelOrder.BGR) # 64 LEDs, 2 MHz speed using SPI, BRG order
led = LEDMatrix(driver,rotation = 2,vert_flip = True) # Correct Orientation
#########################################################################################
# Test script
#~ # For calibrating orientation & colors
#~ anim = MatrixCalibrationTest(led)
#~ anim.run()
# Matrix Channel Test
#~ anim = MatrixChannelTest(led)
#~ anim.run()
fillRect(2,2,1,1,orange)
#~ # Set Brightness
#~ led.setMasterBrightness(255)
#~ # Set one LED
#~ led.set(3,3,(255,255,255)) # Set LED @ x=3,y=3, to color (255,255,255)
# Set all LEDs 1 color
#~ tex = []
#~ for i in range(0,8):
#~ row = []
#~ for j in range(0,8):
#~ row.append((255,0,0))
#~ tex.append(row)
#~ w, h = 8, 8
#~ tex = [[(0,255,0) for x in range(w)] for y in range(h)]
#~ led.setTexture(tex)
#~ print "test"
#~ led.update()
# Create a circle with white background
#~ led.setTexture(tex = Blue)
#~ print led.get(1,1)
#~ print led.texture
#~ led.update()
#~ sleep(1)
#~ off()
#~ sleep(1)
#~ led.drawCircle(3,3,2,Blue)
#~ led.update()
#~ anim = MatrixChannelTest(led)
#~ anim.run()
#~ # Make Animation with Growing Rectangle
#~ while True:
#~ led.drawRect(3, 3, 2, 2, Red)
#~ led.update()
#~ sleep(1)
#~ led.drawRect(2, 2, 4, 4, Red)
#~ led.update()
#~ sleep(1)
#~ led.drawRect(1, 1, 6, 6, Red)
#~ led.update()
#~ sleep(1)
#~ led.drawRect(0, 0, 8, 8, Red)
#~ led.update()
#~ off()
#~ sleep(1)
#~ # Use base animation
#~ class MatrixTest(BaseMatrixAnim):
#~ def __init__(self, led):
#~ #The base class MUST be initialized by calling super like this
#~ super(MatrixTest, self).__init__(led)
#~ #Create a color array to use in the animation
#~ self._colors = [Red, Orange, Yellow, Green, Blue]
#~
#~ def step(self, amt = 1):
#~ #Fill the strip, with each successive color
#~ for i in range(self._led.numLEDs):
#~ self._led.drawRect(-1, -1, i+1, i+1, self._colors[(self._step + i) % len(self._colors)])
#~ #Increment the internal step by the given amount
#~ self._step += amt
#~ def step(self, amt = 1):
#~ self._led.fill((self._step, 0, 0))
#~ self._step += amt
#~ if self._step > 255:
#~ self._step = 0
#~
#~ anim = MatrixTest(led)
#~ anim.run()
#~ array = numpy.random.rand(100)
|
import util
def pack((W,H), unpacked, packed, active, area, on_update):
# Base Cases:
if not unpacked:
on_update(active, packed, None, None, unpacked, "Nothing left to pack. Success.")
return True, packed
if area > W*H:
on_update(active, packed, None, None, unpacked, "... no room left in the bin (" + str(area) + " / " + str(W*H) + ")" )
return False, []
if not active:
on_update(active, packed, None, None, unpacked, "No active points remaining. Backtracking." )
return False, []
# LMAO will be consumed.
lmao = active.pop().project(packed)
if W == lmao.x or H == lmao.y or any( r.covers(lmao.pos()) for r in packed ):
return pack((W,H), unpacked, packed, active, area, on_update)
# Try every rectangle in this spot (or leave it unused).
for rectangle in unpacked.keys():
rect = util.Rectangle(rectangle, lmao.pos())
on_update(active, packed, lmao, rect, unpacked, "Trying rectangle " + str(rect) + " at position " + str(lmao))
if(rect.right() > W or rect.top() > H):
on_update(active, packed, lmao, rect, unpacked, "... but it's out of bounds")
elif any([rect.intersect(x) for x in packed if x.is_real]):
on_update(active, packed, lmao, rect, unpacked, "... but it intersects with another rectangle")
else:
on_update(active, packed, lmao, rect, unpacked, "... it fits")
newactive = active.copy()
newactive.push(util.ActivePoint(rect.left(), rect.top(), axis=0))
newactive.push(util.ActivePoint(rect.right(), rect.bottom(), axis=1))
allfit, packed2 = pack((W,H), unpacked.decr(rectangle), packed + [ rect ], newactive, area, on_update)
if allfit:
return True, packed2
nextsmallest = active.peek().x if active else W
dummy_h = 1
dummy_w = max(1, nextsmallest - lmao.x)
rect = util.Rectangle((dummy_w, dummy_h), lmao.pos(), is_real=False)
allfit, packed2 = pack((W,H), unpacked, packed + [ rect ], active.copy(), area + rect.area(), on_update)
if allfit:
return True, packed2
else:
on_update(active, packed, lmao, rect, unpacked, "... Backtracking.")
return False, [] |
import configparser #处理ini文件
iniFileName = 'iniFileName.ini'
cred = 0
config = configparser.ConfigParser()
config.read(iniFileName, encoding='utf-8')
sectionName = 'config'
list = config.sections() # 获取到配置文件中所有分组名称
if sectionName in list: # 如果分组存在
cred = config.getfloat(sectionName, "Credits")
print(cred) |
#!/usr/bin/env python
import os,sys
import subprocess
from optparse import OptionParser
SYSTEM_MOUNTPOINTS = set(['/proc', '/sys'])
def main():
p = OptionParser(usage="%prog [OPTIONS] cmd")
p.add_option('-b', '--base')
p.add_option('-s', '--shadow', action='append', default=[], dest='shadow_dirs')
p.add_option('-n', '--dry-run', action='store_true', help='print commands, but do nothing')
opts,args = p.parse_args()
assert opts.base, "Please provide a base directory"
def action():
if not opts.dry_run:
execute_as_user(lambda: subprocess.check_call(args))
chroot(base=opts.base, shadow_dirs = opts.shadow_dirs, dry_run=opts.dry_run, action=action)
def chroot(base, shadow_dirs, action, dry_run=False):
mounted_dirs = create_fs_mirror(base, shadow_dirs, dry_run)
def _action():
os.chroot(base)
for system_dir in SYSTEM_MOUNTPOINTS:
_run_cmd(['mount', '-n', system_dir], dry_run=dry_run)
return action()
ret = in_subprocess(_action)
return ret, mounted_dirs
def in_subprocess(func):
"""a helper to run a function in a subprocess"""
child_pid = os.fork()
if child_pid == 0:
os._exit(func() or 0)
else:
(pid, status) = os.waitpid(child_pid, 0)
status = os.WEXITSTATUS(status)
return status
def execute_as_user(func, user=None):
def action():
become_user(user)
return func()
return in_subprocess(action)
import pwd
def become_user(name=None):
if name is None:
name = os.environ['SUDO_USER']
assert name != 'root'
# Get the uid/gid from the name
running_uid = pwd.getpwnam(name).pw_uid
running_gid = pwd.getpwnam(name).pw_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
def _run_cmd(cmd, dry_run, silent=False):
if dry_run:
if not silent:
print ' '.join(cmd)
else:
subprocess.check_call(cmd)
def create_fs_mirror(base, shadow_dirs, dry_run=False):
sep = os.path.sep
base = base.rstrip(sep)
shadow_dirs = [dir.rstrip(sep) for dir in shadow_dirs]
def add_slash(path): return path.rstrip('/') + '/'
assert any([base.startswith(shadow + os.path.sep) for shadow in shadow_dirs])
def is_shadowed(path):
return path in shadow_dirs
def shares_prefix_with_shadowed(path):
return any(add_slash(shadow_dir).startswith(add_slash(path)) for shadow_dir in shadow_dirs)
def run_cmd(cmd, silent=False):
_run_cmd(cmd, dry_run=dry_run, silent=silent)
def chroot_path(path):
return os.path.join(base, path.lstrip('/'))
mounted_dirs = []
def bind_mount(path):
run_cmd(['mkdir','-p', chroot_path(path)], silent=True)
if path not in SYSTEM_MOUNTPOINTS:
# procfs / sysfs will be auto mounted without arguments
run_cmd(['mount', '-n', '--bind', path, chroot_path(path)])
mounted_dirs.append(chroot_path(path))
def mkdir_p(path):
run_cmd(['mkdir', '-p', chroot_path(path)])
for path, dirnames, filenames in os.walk('/', followlinks=True):
fullpath = lambda x: os.path.join(path, x)
for dirname in dirnames[:]:
full = fullpath(dirname)
keep = False
if is_shadowed(full):
mkdir_p(full)
elif not shares_prefix_with_shadowed(full):
bind_mount(full)
else:
# shares a prefix with a shadowed dir; we can't remove it yet...
keep = True
if not keep:
dirnames.remove(dirname)
return mounted_dirs
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 17 10:10:23 2021
@author: tai
"""
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#cv2.imshow("frame", frame)
cv2.imshow("frame", hsv)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
######################### HSV Video ###############################
'''
HSV used to extract the color we want to extract
'''
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#cv2.imshow("frame", frame)
cv2.imshow("frame", hsv)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
######################### HSV Video ###############################
'''
HSV used to extract the color we want to extract
we need to define:
upper bound:
lower bound:
out of range is 0
in range is keep
1. Google: HSV color picker
2. We can find it by using:
'''
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read() # (480, 640, 3)
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # (480, 640, 3)
lower_blue = np.array([90, 50, 50]) # Original: [110, 50, 50]
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Note: bitwise_and take 2 arguiment
result = cv2.bitwise_and(frame, frame, mask = mask)
'''
1 1 = 1
0 1 = 0
1 0 = 0
0 0 = 0
'''
#cv2.imshow("frame", frame)
cv2.imshow("frame", result)
cv2.imshow("mask", mask)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
import numpy as np
import cv2
#cv2.cvtColor([[[255, 0, 0]]], cv2.COLOR_BGR2HSV) # ERROR: TypeError: Expected Ptr<cv::UMat> for argument 'src'
BGR_color = np.array([[[255, 0, 0]]])
#cv2.imshow("BRG frame", BGR_color)
x = cv2.cvtColor(BGR_color, cv2.COLOR_BGR2HSV)
cv2.destroyAllWindows()
''' |
import os
import logging
from collections import defaultdict
import pandas as pd
from fol.foq_v2 import (concate_n_chains, copy_query,
negation_sink,
binary_formula_iterator,
concate_iu_chains,
parse_formula,
decompose_D, to_D,
union_bubble,
DeMorgan_replacement,
to_d,
transformation)
def convert_log_to_csv(logfile, outfile):
already = False
if os.path.exists(outfile):
already = True
already_df = pd.read_csv(outfile)
formula_id_set = set(already_df.formula_id)
original_set = set(already_df.original)
outfile = outfile.replace(".csv", "_extend.csv")
formula_id_set = set()
original_set = ()
data_dict = defaultdict(list)
with open(logfile, 'rt') as f:
for line in f.readlines():
line = line.strip()
*_, rtype, schema, data = line.split(":")
row_data = dict()
if rtype == 'record':
for k, v in zip(schema.split('\t'), data.split('\t')):
row_data[k.strip()] = v.strip()
if row_data['original'] in original_set:
continue
if row_data['formula_id'] in formula_id_set:
num = int(row_data['formula_id'][-4:])
while True:
new_key = f"type{num+1:04d}"
if new_key not in formula_id_set:
row_data['formula_id'] = new_key
formula_id_set.add(new_key)
break
num += 1
for k in row_data:
data_dict[k].append(row_data[k])
df = pd.DataFrame(data_dict)
df = df.drop_duplicates(subset=['original'])
df.to_csv(outfile, index=False)
if already:
df = df.append(already_df, ignore_index=True)
df.to_csv(outfile.replace("extend", "full"), index=False)
for c in df.columns:
logging.info(f"{len(df[c].unique())} {c} unique formulas found")
# for i, row in df.iterrows():
# if len(row) == len(set(row.tolist())):
# print(row.tolist())
def convert_to_dnf(query):
# query = transformation(query, projection_sink)
def dnf_step(query):
return union_bubble(concate_n_chains(negation_sink(query)))
query = transformation(query, dnf_step)
return query
def count_query_depth(query):
if query.__o__ == 'e':
return 0
elif query.__o__ in 'uiUID':
return max(count_query_depth(q) for q in query.sub_queries)
elif query.__o__ == 'p':
return count_query_depth(query.query) + 1
elif query.__o__ == 'n':
return count_query_depth(query.query)
elif query.__o__ == 'd':
return max(count_query_depth(query.lquery), count_query_depth(query.rquery))
else:
raise NotImplementedError
def normal_forms_generation(formula):
result = {}
query = parse_formula(formula)
result['original_depth'] = count_query_depth(query)
formula = query.formula
# proj, rproj = load_graph()
# query.backward_sample()
result["original"] = query.formula
query = DeMorgan_replacement(parse_formula(formula))
DM_MultiI = concate_iu_chains(copy_query(query, True))
result["DeMorgan"] = query.formula
result["DeMorgan+MultiI"] = DM_MultiI.formula
query_dnf = convert_to_dnf(parse_formula(formula))
result["DNF"] = query_dnf.formula
query = to_d(parse_formula(formula))
result["diff"] = query.formula
query = to_d(parse_formula(query_dnf.formula))
result["DNF+diff"] = query.formula
query_dnf_multiiu = concate_iu_chains(parse_formula(query_dnf.formula))
result["DNF+MultiIU"] = query_dnf_multiiu.formula
query = to_D(parse_formula(query_dnf_multiiu.formula))
result["DNF+MultiIUD"] = query.formula
result["DNF+MultiIUd"] = decompose_D(
parse_formula(result['DNF+MultiIUD'])).formula
return result
if __name__ == "__main__":
# formula = "(i,(i,(n,(p,(e))),(p,(i,(n,(p,(e))),(p,(e))))),(u,(p,(p,(e))),(p,(e))))"
# r = normal_forms_generation(formula)
# print(r)
logging.basicConfig(filename='logs/formula_generation.log',
filemode='wt',
level=logging.INFO)
total_count = 0
reductions = defaultdict(set)
for k in range(1, 4):
it = binary_formula_iterator(depth=3, num_anchor_nodes=k)
for i, f in enumerate(it):
res = normal_forms_generation(f)
res['formula_id'] = f"type{total_count:04d}"
res['num_anchor_nodes'] = k
keys = list(res.keys())
title_str = "\t".join(keys)
formula_str = "\t".join(str(res[k]) for k in keys)
total_count += 1
logging.info(f"record:{title_str}:{formula_str}")
for _k in keys:
reductions[_k].add(res[_k])
convert_log_to_csv('logs/formula_generation_test.log',
f'outputs/generated_formula_anchor_node={k}.csv')
for k, v in reductions.items():
logging.info(f"statistics:{len(v)} {k} produced cumulatively")
logging.info(f":statistics:{total_count} formulas are produced")
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/18 14:30
# @Author :
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Profile(AbstractUser):
'''
用户
'''
name = models.CharField(max_length=50, null=True, blank=True, verbose_name='姓名', help_text='姓名')
department = models.CharField(max_length=255, null=True, blank=True, verbose_name='部门', help_text='部门')
description = models.CharField(max_length=255, null=True, blank=True, verbose_name='描述', help_text='描述')
class Mata:
verbose_name = '用户'
verbose_name_plural = '用户'
# class AttributesLevel(models.Model):
# '''
# AD属性等级
# '''
# apiname = models.CharField(max_length=255, verbose_name='apiname', help_text='apiname')
# attributes = models.CharField(max_length=255, verbose_name='属性详情', help_text='属性详情')
#
# class Mata:
# verbose_name = 'AD属性等级'
# verbose_name_plural = 'AD属性等级'
#
#
#
# class ApinamePermissions(models.Model):
# '''
# API权限限制
# '''
# username = models.ForeignKey(Profile, verbose_name='用户名')
# apiname = models.CharField(max_length=50, verbose_name='API名称', help_text='API名称')
#
# class Mata:
# verbose_name = 'API权限限制'
# verbose_name_plural = 'API权限限制'
|
"""
@author: Badita Marin-Georgian
@email: geo.badita@gmail.com
@date: 22.03.2020 00:21
"""
import pytest
from dronem_gym_env.envs import DronemEnv
from env_interpretation import Meeting
@pytest.fixture
def env4_robots():
"""
Returns the environment, alongside its pair mapping
:return:
"""
return DronemEnv(
num_robots=4,
max_memory=15,
init_memory=10,
meetings=[
Meeting(r1=1, r2=2, first_time=4),
Meeting(r1=2, r2=3, first_time=2),
Meeting(r1=0, r2=1, first_time=2),
Meeting(r1=0, r2=2, first_time=4)
],
cycles=[
[2],
[1, 2],
[3, 5, 4, 2],
[6, 5]
]
)
@pytest.fixture
def env3_robots():
"""
Returns the environment, alongside its robot pair mapping
:return:
"""
return DronemEnv(
num_robots=3,
init_memory=15,
max_memory=55,
meetings=[
Meeting(r1=1, r2=2, first_time=5),
Meeting(r1=0, r2=1, first_time=1)
],
cycles=[
[2],
[1, 2, 3],
[4, 3]
]
)
@pytest.fixture
def env3_robots_modified():
"""
Returns the environment, alongside its robot pair mapping
:return:
"""
return DronemEnv(
num_robots=3,
init_memory=1,
max_memory=2,
meetings=[
Meeting(r1=1, r2=2, first_time=2),
Meeting(r1=0, r2=1, first_time=1)
],
cycles=[
[2],
[2, 3, 1],
[4, 3]
]
)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 6 22:16:49 2020
@author: Caven
"""
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
dic = {}
for cp in cpdomains:
visit, domain = cp.split(' ',2)
domainSplit = domain.split('.')
n = len(domainSplit)
domainSplitNames = []
for i in range(n):
domainSplitNames.append('.'.join(domainSplit[i:n]))
for d in domainSplitNames:
if d in dic:
dic[d] += int(visit)
else:
dic[d] = int(visit)
ans = []
for key in dic:
ans.append(' '.join([str(dic[key]), key]))
return ans
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 15:51:25 2020
@author: BreezeCat
"""
import math as m
import matplotlib.pyplot as plt
import copy
import json
class State():
def __init__(self, Px, Py, Pth, V, W, r):
self.Px = Px
self.Py = Py
self.Pth = Pth
self.V = V
self.W = W
self.r = r
def List(self):
return [self.Px, self.Py, self.Pth, self.V, self.W, self.r]
def Record_state(self, save_file):
file = open(save_file, 'a+')
state = {}
state['Px'], state['Py'], state['Pth'], state['V'], state['W'], state['r'] = self.Px, self.Py, self.Pth, self.V, self.W, self.r
json.dump(state, file)
file.writelines('\n')
file.close()
return
class Observed_State():
def __init__(self, x, y, Vx, Vy, r):
self.x = x
self.y = y
self.Vx = Vx
self.Vy = Vy
self.r = r
def List(self):
return [self.x, self.y, self.Vx, self.Vy, self.r]
class Agent():
def __init__(self, name, Px, Py, Pth, V, W, r, gx, gy, gth, rank, mode = 'Greedy', nonholonomic = True):
self.name = name
self.state = State(Px, Py, Pth, V, W, r)
self.gx, self.gy, self.gth, self.rank, self.mode = gx, gy, gth, rank, mode
self.Path = [copy.deepcopy(self.state)]
self.Goal_state = 'Not'
self.oscill_count = 0
self.oscill_time = 0
self.nonholonomic = nonholonomic
def Update_state(self, dt = 0.1):
TH = Correct_angle(self.state.Pth + self.state.W * dt)
self.state.Px += self.state.V * m.cos((self.state.Pth+TH)/2) * dt
self.state.Py += self.state.V * m.sin((self.state.Pth+TH)/2) * dt
self.state.Pth = TH
self.Path.append(copy.deepcopy(self.state))
def Predit_state(self, V_pred, W_pred, dt = 0.1):
TH = Correct_angle(self.state.Pth + W_pred * dt)
Px_pred = self.state.Px + V_pred * m.cos((self.state.Pth+TH)/2) * dt
Py_pred = self.state.Py + V_pred * m.sin((self.state.Pth+TH)/2) * dt
return State(Px_pred, Py_pred, TH, V_pred, W_pred, self.state.r)
def Set_V_W(self, V_next, W_next):
self.state.V = V_next
self.state.W = W_next
def Relative_observed_goal(self, observe_x, observe_y, observe_th):
gx_temp = self.gx - observe_x
gy_temp = self.gy - observe_y
gth_obs = Correct_angle(self.gth - observe_th)
th_obs = observe_th
gx_obs = m.cos(th_obs) * gx_temp + m.sin(th_obs) * gy_temp
gy_obs = -m.sin(th_obs) * gx_temp + m.cos(th_obs) * gy_temp
return gx_obs, gy_obs, gth_obs
def Relative_observed_state(self, observe_x, observe_y, observe_th):
x_temp = self.state.Px - observe_x
y_temp = self.state.Py - observe_y
#th_obs = Correct_angle(self.state.Pth - observe_th)
th_obs = observe_th
x_obs = m.cos(th_obs) * x_temp + m.sin(th_obs) * y_temp
y_obs = -m.sin(th_obs) * x_temp + m.cos(th_obs) * y_temp
Vx_obs = m.cos(th_obs) * self.state.V * m.cos(self.state.Pth) + m.sin(th_obs) * self.state.V * m.sin(self.state.Pth)
Vy_obs = -m.sin(th_obs) * self.state.V * m.cos(self.state.Pth) + m.cos(th_obs) * self.state.V * m.sin(self.state.Pth)
return Observed_State(x_obs, y_obs, Vx_obs, Vy_obs, self.state.r)
def Plot_state(self, ax, color = 'b'):
L = 0.5
plt.plot(self.state.Px, self.state.Py, color+'o')
if self.nonholonomic:
plt.arrow(self.state.Px, self.state.Py, L*m.cos(self.state.Pth), L*m.sin(self.state.Pth))
circle1 = plt.Circle( (self.state.Px, self.state.Py), self.state.r, color = color, fill = False)
ax.add_artist(circle1)
def Plot_goal(self, ax ,color = 'b'):
L = 0.5
plt.plot(self.gx, self.gy, color+'o')
if self.nonholonomic:
plt.arrow(self.gx, self.gy, L*m.cos(self.gth), L*m.sin(self.gth))
def Plot_Path(self, ax, color = 'b'):
L = 0.5
circle = []
i = 0
last_item = self.Path[0]
for item in self.Path:
plt.plot([last_item.Px, item.Px], [last_item.Py, item.Py], color+'-')
if i%10 == 0:
plt.plot(item.Px, item.Py, color+'o')
if self.nonholonomic:
plt.arrow(item.Px, item.Py, L*m.cos(item.Pth), L*m.sin(item.Pth))
circle.append(plt.Circle( (item.Px, item.Py), item.r, color = color, fill = False))
#plt.text(item.Px-0.2, item.Py, str(i), bbox=dict(color=color, alpha=0.5))
ax.add_artist(circle[-1])
i += 1
last_item = item
plt.plot(item.Px, item.Py, color+'o')
if self.nonholonomic:
plt.arrow(item.Px, item.Py, L*m.cos(item.Pth), L*m.sin(item.Pth))
circle.append(plt.Circle( (item.Px, item.Py), item.r, color = color, fill = False))
#plt.text(item.Px-0.2, item.Py, str(i-1), bbox=dict(color=color, alpha=0.5))
ax.add_artist(circle[-1])
return ax
def Record_data(self, save_path):
file_name = save_path + '/' + self.name + '.json'
init_para = {}
init_para['gx'], init_para['gy'], init_para['gth'], init_para['rank'], init_para['mode'], init_para['result'] = self.gx, self.gy, self.gth, self.rank, self.mode, self.Goal_state
file = open(file_name, 'a+')
json.dump(init_para, file)
file.writelines('\n')
file.close()
for state in self.Path:
state.Record_state(file_name)
def Transform_to_Dict(self):
D = {}
D['name'], D['Px'], D['Py'], D['Pth'], D['V'], D['W'], D['r'] = self.name, self.state.Px, self.state.Py, self.state.Pth, self.state.V, self.state.W, self.state.r
D['gx'], D['gy'], D['gth'], D['rank'], D['mode'] = self.gx, self.gy, self.gth, self.rank, self.mode
return D
def Check_oscillation(self, check_step):
if self.oscill_count > 0:
if self.oscill_count <= 2:
self.mode = 'Greedy'
self.oscill_count -= 1
return
if len(self.Path) < check_step:
print('Not move enough step')
return
else:
Pose_now = (self.Path[-1].Px, self.Path[-1].Py, self.Path[-1].Pth)
Pose_check = (self.Path[-check_step].Px, self.Path[-check_step].Py, self.Path[-check_step].Pth)
#Pose_check_p1 = (self.Path[-check_step+1].Px, self.Path[-check_step+1].Py, self.Path[-check_step+1].Pth)
if abs(Pose_now[0] - Pose_check[0]) < 0.04 and abs(Pose_now[1] - Pose_check[1]) < 0.04 and abs(Pose_now[2] - Pose_check[2]) < 0.05:
print('oscillation')
self.mode = 'Oscillation'
self.oscill_count = 5
self.oscill_time += 1
return
#elif abs(Pose_now[0] - Pose_check_p1[0]) < 0.04 and abs(Pose_now[1] - Pose_check_p1[1]) < 0.04 and abs(Pose_now[2] - Pose_check_p1[2]) < 0.05:
# print('oscillation')
# self.mode = 'Oscillation'
# return
else:
self.mode = 'Greedy'
return
def DicttoAgent(agent_dict):
return Agent(agent_dict['name'], agent_dict['Px'], agent_dict['Py'], agent_dict['Pth'], agent_dict['V'], agent_dict['W'], agent_dict['r'],
agent_dict['gx'], agent_dict['gy'], agent_dict['gth'], agent_dict['rank'], agent_dict['mode'])
def Correct_angle(angle):
angle = m.fmod(angle, 2*m.pi)
if angle < 0:
angle = angle + 2*m.pi
return angle
def Dot(vector1, vector2):
return vector1[0]*vector2[0]+vector1[1]*vector2[1]
def Norm2(vector):
return m.sqrt(Dot(vector,vector))
def Tangent_angle_with_circle(point, radius):
return m.asin(radius/Norm2(point))
def If_in_VO(main_state: State, other_state: Observed_State, time_factor=2):
relative_velocity = (other_state.Vx - main_state.V, other_state.Vy)
P = (-other_state.x, -other_state.y)
R = main_state.r + other_state.r
if Norm2(relative_velocity) == 0:
return False
cosAngle = Dot(P, relative_velocity)/(Norm2(P)* Norm2(relative_velocity))
if cosAngle > 1:
cosAngle = 1
elif cosAngle < -1:
cosAngle = -1
Relative_angle = m.acos(cosAngle)
if Norm2(P) <= R:
return True
else:
TAWC = Tangent_angle_with_circle(P, R)
if Relative_angle > TAWC:
return False
else:
if time_factor == 'INF':
return True
elif (Dot(P, P)/Dot(relative_velocity, P)) < time_factor:
return True
else:
return False
def main_test():
A = Agent('A',1,1,0,1,1,0.5,0,0,0,1)
B = Agent('B',-1,-1,0,1,-1,0.2,0,0,0,2)
ax = plt.gca()
ax.set_xlim((-5,5))
ax.set_ylim((-5,5))
A.Plot_state(ax = ax)
B.Plot_state(ax = ax, color = 'r')
plt.savefig('test.png')
plt.close('all')
ax = plt.gca()
ax.set_xlim((-5,5))
ax.set_ylim((-5,5))
for i in range(45):
A.Update_state(0.1)
B.Update_state(0.1)
A.Plot_Path(ax = ax)
B.Plot_Path(ax = ax, color='r')
A.Record_data('logs') |
"""Test IOLoop watcher interface."""
from julythontweets.watcher import Watcher
from tornado.ioloop import IOLoop
from unittest2 import TestCase
class TestWatcher(TestCase):
def test_base_watcher(self):
"""Test the watcher interface."""
ioloop = IOLoop()
def callback():
pass
watcher = Watcher(ioloop, callback, {})
with self.assertRaises(NotImplementedError):
watcher.start()
|
import os
import logging
import ckan.plugins as p
from ckan.model.types import make_uuid
from ckan.lib.celery_app import celery
log = logging.getLogger(__name__)
def create_archiver_resource_task(resource, queue):
from pylons import config
if p.toolkit.check_ckan_version(max_version='2.2.99'):
# earlier CKANs had ResourceGroup
package = resource.resource_group.package
else:
package = resource.package
task_id = '%s/%s/%s' % (package.name, resource.id[:4], make_uuid()[:4])
ckan_ini_filepath = os.path.abspath(config['__file__'])
celery.send_task('archiver.update_resource',
args=[ckan_ini_filepath, resource.id, queue],
task_id=task_id, queue=queue)
log.debug('Archival of resource put into celery queue %s: %s/%s url=%r',
queue, package.name, resource.id, resource.url)
def create_archiver_package_task(package, queue):
from pylons import config
task_id = '%s/%s' % (package.name, make_uuid()[:4])
ckan_ini_filepath = os.path.abspath(config['__file__'])
celery.send_task('archiver.update_package',
args=[ckan_ini_filepath, package.id, queue],
task_id=task_id, queue=queue)
log.debug('Archival of package put into celery queue %s: %s',
queue, package.name)
def get_extra_from_pkg_dict(pkg_dict, key, default=None):
for extra in pkg_dict.get('extras', []):
if extra['key'] == key:
return extra['value']
return default
|
sal = float(input('Digite o Salario do Funcionario: '))
if sal > 1250:
print('Com um aumento ele passara a receber R${:.2f}'.format(sal + (sal / 100 * 10)))
else:
print('Com um aumneto ele passara a receber R${:.2f}'.format(sal + (sal / 100 * 15)))
|
def addlist():
fruits = ["Apple", "Orange", "Mango"]
fruit = input("Enter the fruit name:\t")
fruits.append(fruit)
print("You have succesfully added \"{}\" into the list".format(fruit))
print(fruits)
addlist() |
#!/usr/bin/python2.7
import cv
capture = cv.CaptureFromCAM(0)
hc_src = '/home/ko/Downloads/Hand.Cascade.1.xml'
hc_src = '/home/ko/Downloads/haarcascade_frontalface_default.xml'
hc = cv.Load(hc_src)
storage = cv.CreateMemStorage(0)
while True:
frame = cv.QueryFrame(capture)
hands = cv.HaarDetectObjects(frame, hc, storage,
1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING,
(0,0))
for (x,y,w,h),n in hands:
cv.Rectangle(frame, (x,y), (x+w, y+h), 255)
cv.ShowImage('web', frame)
c = cv.WaitKey(2)
if c == 27:
break
|
#!/usr/bin/env python
#--coding:utf-8-*-
'''
创建人: Javen
创建时间:2017/2/9
'''
import sys
from Models.DbTable.Abstract import Model_DbTable_Abstract
class Model_Mapper_Abstract(object):
def __init__(self):
self.dbTable = Model_DbTable_Abstract()
self.mapper = self.dbTable.mapper
self.amazon = self.dbTable.amazon
self.conn = self.dbTable.db
def findData(self, data=None, From=None, where=None, limit=None, offset=None, order=None):
v = []
if (where):
for key, value in where.items():
v.append('`%s` = "%s"' % (key, value))
where = " and ".join(v)
else:
sql = "SELECT * FROM %s" % (From)
# print (sql)
results = self.mapper.select(sql)
return results
if (data == "all"):
if (order):
if (limit):
if (offset):
sql = 'SELECT * FROM %s WHERE %s ORDER BY %s LIMIT %s OFFSET %s' % (From, where, order, limit, offset)
else:
sql = 'SELECT * FROM %s WHERE %s ORDER BY %s LIMIT %s' % (From, where, order, limit)
else:
sql = 'SELECT * FROM %s WHERE %s ORDER BY %s' % (From, where, order)
else:
if (limit):
if (offset):
sql = 'SELECT * FROM %s WHERE %s LIMIT %s OFFSET %s' % (From, where, limit, offset)
else:
sql = 'SELECT * FROM %s WHERE %s LIMIT %s' % (From, where, limit)
else:
sql = 'SELECT * FROM %s WHERE %s' % (From, where)
else:
if (order):
if (limit):
if (offset):
sql = 'SELECT %s FROM %s WHERE %s ORDER BY %s LIMIT %s OFFSET %s' % (data, From, where, order, limit, offset)
else:
sql = 'SELECT %s FROM %s WHERE %s ORDER BY %s LIMIT %s' % (data, From, where, order, limit)
else:
sql = 'SELECT %s FROM %s WHERE %s ORDER BY %s' % (data, From, where, order)
else:
if (limit):
if (offset):
sql = 'SELECT %s FROM %s WHERE %s LIMIT %s OFFSET %s' % (data, From, where, limit, offset)
else:
sql = 'SELECT %s FROM %s WHERE %s LIMIT %s' % (data, From, where, limit)
else:
sql = 'SELECT %s FROM %s WHERE %s' % (data, From, where)
results = self.mapper.select(sql)
return results
def update(self, table=None, data=None, searchData=None):
# 一般抓取时更新下载队列
if (table == "download_queue"):
id = searchData['id']
sql = self.amazon.update_download_queue(id, data)
result = self.mapper.update(sql)
# 抓取评论时更新下载队列
elif (table == "download_queue_review"):
id = searchData['id']
sql = self.amazon.update_download_queue_review(str(id), data)
result = self.mapper.update(sql)
# 移动端抓取时更新下载队列
elif (table == "mobile_download_queue"):
region = data["region"]
type = data["type"]
value = data["value"].strip()
status = data["status"]
scrape_count = data["scrape_count"]
sql = self.amazon.update_mobile_download_queue(str(region), str(type), value, str(status), str(scrape_count))
result = self.mapper.update(sql)
# 抓取产品时更新产品表
elif (table == "amazon_product"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.product_update_sql_joint(region, asin, data)
result = self.mapper.update(sql)
# 抓取产品时更新图片表
elif (table == "amazon_product_images"):
region = searchData["region"]
asin = searchData["asin"]
url = searchData["url"]
sql = self.amazon.product_images_update_sql_joint(region, asin, url, data)
result = self.mapper.update(sql)
# 抓取关键词时更新关键词表
elif (table == "keywords"):
region = searchData["region"]
keywords = searchData["name"]
sql = self.amazon.keywordsObject_update_sql_joint(region, keywords, data)
result = self.mapper.update(sql)
# 移动端抓取关键词时更新移动关键词表
elif (table == "mobile_keywords"):
region = searchData["region"]
keywords = searchData["name"]
sql = self.amazon.mobilekeywordsObject_update_sql_joint(region, keywords, data)
result = self.mapper.update(sql)
# 一般抓取时更新上传队列表
elif (table == "upload_queue"):
region = data["region"]
type = data["type"]
asin = data["value"]
sql = self.amazon.getUploadQueue_update_sql_joint(region, type, asin)
result = self.mapper.update(sql)
# 移动端抓取时更新上传队列表
elif (table == "mobile_upload_queue"):
region = data["region"]
type = data["type"]
asin = data["value"]
sql = self.amazon.getMobileUploadQueue_update_sql_joint(region, type, asin)
result = self.mapper.update(sql)
# 抓取卖家产品时更新卖家offer表
elif (table == "amazon_seller_product_offer"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.offer_update_sql_joint(region, asin, data)
result = self.mapper.update(sql)
# 抓取卖家信息时更新卖家信息表
elif (table == "amazon_seller"):
region = searchData["region"]
sql = self.amazon.seller_update_sql_joint(region, data)
result = self.mapper.update(sql)
# 抓取卖家产品时更新卖家产品信息表
elif (table == "amazon_seller_product"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.sellerproduct_update_sql_joint(region, asin, data)
result = self.mapper.update(sql)
# 抓取seller时卖家表更新
elif (table == "seller"):
region = searchData["region"]
seller_id = searchData["seller_id"]
sql = self.amazon.sellerinfo_update_sql_joint(region, seller_id, data)
result = self.mapper.update(sql)
# 抓取seller产品时先重置产品排名
elif (table == "productssellerrank"):
region = data["region"]
seller_id = data["seller_id"]
sql = self.amazon.sellerproducts_rankupdate_sql_joint(region, seller_id)
result = self.mapper.update(sql)
# 抓取评论列表时
elif (table == "amazon_product_review"):
region = searchData['region']
asin = searchData['asin']
review_id = searchData['review_id']
sql = self.amazon.productReview_update_sql_joint(region, asin, review_id, data)
result = self.mapper.update(sql)
# 抓取评论页图片信息(更新)
elif (table == "amazon_product_review_image"):
region = searchData['region']
review_id = searchData['review_id']
url = searchData['url']
sql = self.amazon.productReviewImage_update_sql_joint(region, review_id, url, data)
result = self.mapper.update(sql)
# 抓取评论页视频信息(更新)
elif (table == "amazon_product_review_video"):
region = searchData['region']
review_id = searchData['review_id']
sql = self.amazon.productReviewVideo_update_sql_joint(region, review_id, data)
result = self.mapper.update(sql)
return result
def insert(self, table=None, data=None, searchData=None):
# 抓取产品时插入产品信息表
if (table == "amazon_product"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.product_insert_sql_joint(region, asin, data)
result = self.mapper.insert(sql)
# 抓取产品时插入产品图片表
elif (table == "amazon_product_images"):
region = searchData["region"]
asin = searchData["asin"]
url = searchData["url"]
sql = self.amazon.product_images_insert_sql_joint(region, asin, url, data)
result = self.mapper.insert(sql)
elif (table == "amazon_product_image"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.product_image_insert_sql_joint(region, asin, data)
result = self.mapper.insert(sql)
# 抓取关键词时插入广告关键词表
elif (table == "amazon_product_keywords_ad"):
region = searchData["region"]
keywords = searchData["keywords"]
node_id = searchData["node_id"]
ad_position = searchData["ad_position"]
ad_position_type = searchData["ad_position_type"]
position = searchData["position"]
sql = self.amazon.keywords_ad_insert_sql_joint(region, keywords, node_id, ad_position, ad_position_type, position, data)
result = self.mapper.insert(sql)
# 抓取关键词插入广告排名表
elif (table == "amazon_product_keywords_rank"):
region = searchData["region"]
keywords = searchData["keywords"]
node_id = searchData["node_id"]
rank = searchData["rank"]
sql = self.amazon.keywords_rank_insert_sql_joint(region, keywords, node_id, rank, data)
result = self.mapper.insert(sql)
# 抓取关键词时插入关键词信息表
elif (table == "keywords"):
region = searchData["region"]
keywords = searchData["name"]
sql = self.amazon.keywordsObject_insert_sql_joint(region, keywords, data)
result = self.mapper.insert(sql)
# 移动端抓取关键词时插入移动端关键词信息表
elif (table == "mobile_keywords"):
region = searchData["region"]
keywords = searchData["name"]
sql = self.amazon.mobilekeywordsObject_insert_sql_joint(region, keywords, data)
result = self.mapper.insert(sql)
# 抓取关键词时插入上传产品队列表
elif (table == "upload_queue"):
region = data["region"]
type = data["type"]
asin = data["value"]
time = data["time"]
sql = self.amazon.getUploadQueue_insert_sql_joint(region, type, asin, time)
result = self.mapper.insert(sql)
# 移动端抓取关键词时插入上传产品队列表
elif (table == "mobile_upload_queue"):
region = data["region"]
type = data["type"]
asin = data["value"]
sql = self.amazon.getMobileUploadQueue_insert_sql_joint(region, type, asin)
result = self.mapper.insert(sql)
# 抓取卖家offer时插入卖家offer表
elif (table == "amazon_seller_product_offer"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.offer_insert_sql_joint(region, asin, data)
result = self.mapper.insert(sql)
# 抓取卖家offer时插入卖家信息表
elif (table == "amazon_seller"):
region = searchData["region"]
sql = self.amazon.seller_insert_sql_joint(region, data)
result = self.mapper.insert(sql)
# 抓取卖家offer时插入卖家产品信息表
elif (table == "amazon_seller_product"):
region = searchData["region"]
asin = searchData["asin"]
sql = self.amazon.sellerproduct_insert_sql_joint(region, asin, data)
result = self.mapper.insert(sql)
# 抓取卖家信息页时抓取卖家信息表
elif (table == "seller"):
region = searchData["region"]
seller_id = searchData["seller_id"]
sql = self.amazon.sellerinfo_insert_sql_joint(region, seller_id, data)
result = self.mapper.insert(sql)
# 插入产品前先插入卖家信息
elif (table == "product_seller"):
region = data["region"]
seller_id = data["seller_id"]
sql = self.amazon.product_seller_insert_sql_joint(region, seller_id)
result = self.mapper.insert(sql)
# 插入表scrape
elif (table == "scrape"):
sql = self.amazon.insert_scrape(data)
try:
result = self.mapper.insert(sql)
except:
pass
# 插入product_review表
elif (table == "amazon_product_review"):
sql = self.amazon.productReview_insert_sql_joint(data)
result = self.mapper.insert(sql)
# 抓取评论页图片信息(插入)
elif (table == "amazon_product_review_image"):
sql = self.amazon.productReviewImage_insert_sql_joint(data)
result = self.mapper.insert(sql)
# 抓取评论页图片信息(插入)
elif (table == "amazon_product_review_video"):
sql = self.amazon.productReviewVideo_insert_sql_joint(data)
result = self.mapper.insert(sql)
return result
def delete(self, table=None, searchData=None):
if (table == "amazon_product_keywords_ad"):
region = searchData["region"]
keywords = searchData["keywords"]
node_id = searchData["node_id"]
sql = self.amazon.keywords_ad_delete_sql_joint(region, keywords, node_id)
result = self.mapper.delete(sql)
elif (table == "amazon_product_keywords_rank"):
region = searchData["region"]
keywords = searchData["keywords"]
node_id = searchData["node_id"]
sql = self.amazon.keywords_rank_delete_sql_joint(region, keywords, node_id)
result = self.mapper.delete(sql)
return result
|
import os
import xml.etree.ElementTree as ET
from rogue_sky import darksky
DARKSKY_API_KEY = os.environ["DARKSKY_SECRET_KEY"]
def test_ping(backend_api_client):
response = backend_api_client.get("/api/health")
assert response.status_code == 200
assert response.get_json() == {"status": "ok"}
def test_get_blog_posts(backend_api_client):
response = backend_api_client.get("/api/blog")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, list)
assert len(actual) > 0
assert not set(actual[0]) - set(["title", "date", "summary", "url"])
def test_get_blog_post(backend_api_client):
response = backend_api_client.get("/api/blog/something-worth-writing")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, dict)
assert not set(actual) - set(["title", "date", "summary", "content", "url"])
def test_get_rss(backend_api_client):
response = backend_api_client.get("/api/rss")
assert response.status_code == 200
assert ET.fromstring(response.data.decode())
def test_get_astronomical_events(backend_api_client):
response = backend_api_client.get("/api/astronomical_events")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, list)
assert len(actual) > 0
actual = actual[0]
assert isinstance(actual, dict)
assert not set(actual) - set(["date", "event", "info", "type"])
|
import matplotlib.pyplot as plt
class Plots:
def plt_ini(self, figw=16.0, figh=6.0, rows=1, cols=1):
fig, axes = plt.subplots(cols, rows, sharex=True, sharey=True,
figsize=(figw, figh))
plt.subplots_adjust(left=1 / figw, right=1 - 1 / figw, bottom=1 / figh, top=1 - 1 / figh)
# Initialize the figure
plt.style.use('seaborn-darkgrid')
return fig, axes |
# This file is part of HappySchool.
#
# HappySchool is the legal property of its developers, whose names
# can be found in the AUTHORS file distributed with this source
# distribution.
#
# HappySchool is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HappySchool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with HappySchool. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from rest_framework import serializers
from core.serializers import StudentSerializer, StudentModel
from .models import LatenessSettingsModel, LatenessModel
class LatenessSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = LatenessSettingsModel
fields = '__all__'
class LatenessSerializer(serializers.ModelSerializer):
# In order to write with the id and read the entire object, it uses two fields field + field_id.
student = StudentSerializer(read_only=True)
student_id = serializers.PrimaryKeyRelatedField(queryset=StudentModel.objects.all(),
source='student', required=False, allow_null=True)
class Meta:
model = LatenessModel
fields = ('id', 'student', 'student_id', 'datetime_creation', 'sanction_id',
'lateness_count', 'justified', 'has_sanction')
|
#!/usr/bin/env python
#=============================================================================
#
# Copyright (c) 2018 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
#=============================================================================
from converters import code_to_message
from converters.tensorflow.util import ConverterError
from converters.tensorflow.common import LayerDescriptor, LayerResolver, LayerBuilder
from converters.tensorflow.graph_matcher import (
ConverterSequenceNode,
NonConsumableConverterSequenceNode,
GraphSequence
)
class PermuteLayerResolver(LayerResolver, object):
class Descriptor(LayerDescriptor):
def __init__(self, name, nodes, order, output_names=None):
super(PermuteLayerResolver.Descriptor, self).__init__('Permute', name, nodes, output_names=output_names)
self.order = order
def __init__(self):
self.sequence_with_explicit_order = GraphSequence([
ConverterSequenceNode('root', ['Transpose']),
ConverterSequenceNode('order', ['Const']),
NonConsumableConverterSequenceNode('input', ['?']),
])
self.sequence_with_explicit_order.set_inputs('root', ['input', 'order'])
self.sequence_with_explicit_order.set_outputs(['root'])
self.sequence_with_implicit_order = GraphSequence([
ConverterSequenceNode('root', ['Transpose']),
ConverterSequenceNode('order', ['Sub']),
ConverterSequenceNode('a', ['Sub']),
ConverterSequenceNode('b', ['Const']),
ConverterSequenceNode('c', ['Range']),
ConverterSequenceNode('d', ['Const']),
ConverterSequenceNode('e', ['Const']),
ConverterSequenceNode('f', ['Rank']),
NonConsumableConverterSequenceNode('input', ['?'])
])
self.sequence_with_implicit_order.set_inputs('root', ['input', 'order'])
self.sequence_with_implicit_order.set_inputs('order', ['a', 'c'])
self.sequence_with_implicit_order.set_inputs('a', ['b', 'f'])
self.sequence_with_implicit_order.set_inputs('c', ['d', 'e', 'f'])
self.sequence_with_implicit_order.set_inputs('f', ['input'])
self.sequence_with_implicit_order.set_outputs(['root'])
self.sequences = [self.sequence_with_explicit_order, self.sequence_with_implicit_order]
def resolve_layer(self, graph_matcher, graph_helper):
descriptors = []
for sequence in self.sequences:
for match in graph_matcher.match_sequence(sequence):
transpose_op = match['root']
input_op = match['input']
order_op = match['order']
order_tensor = graph_helper.evaluate_tensor_output(order_op.outputs[0])
input_shape = graph_helper.get_op_output_shape(input_op)
order_shape = graph_helper.get_op_output_shape(order_op)
input_rank = len(input_shape)
order_rank = len(order_shape)
try:
assert order_rank == 1
for d in range(input_rank):
assert d in order_tensor
except AssertionError:
raise ConverterError(code_to_message.get_message(
'ERROR_TF_PERMUTE_INVALID_ORDER_TENSOR')(str(order_tensor)))
consumed_nodes = match.consumed_nodes
permute_descriptor = PermuteLayerResolver.Descriptor(
str(transpose_op.name), consumed_nodes, order_tensor,
output_names=[str(transpose_op.outputs[0].name)])
descriptors.extend([permute_descriptor])
return descriptors
class PermuteLayerBuilder(LayerBuilder):
def build_layer(self, converter_context, descriptor, input_descriptors, output_descriptors):
"""
:type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
:type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
:type converter_context: converters.tensorflow.converter.ConverterContext
:type descriptor: PermuteLayerResolver.Descriptor
:rtype: int
"""
input_name = self.get_input_name(converter_context, descriptor, input_descriptors)
output_name = descriptor.output_names[0]
return converter_context.model.add_permute_layer(name=descriptor.layer_name,
order=descriptor.order.tolist(),
input_name=input_name,
output_name=output_name)
|
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# import heapq
# nums = list(set(nums))
# if len(nums) < 3:
# return max(nums)
# num_heap = nums[:3]
# heapq.heapify(num_heap)
#
# for n in nums[3:]:
# if num_heap[0] < n:
# heapq.heappop(num_heap)
# heapq.heappush(num_heap, n)
#
# return num_heap[0]
# one-pass
res = [-float('inf')] * 3
for n in nums:
if n not in res:
if n > res[2]:
res = [res[1], res[2], n]
elif n > res[1]:
res = [res[1], n, res[2]]
elif n > res[0]:
res = [n, res[1], res[2]]
print(res)
if -float('inf') in res:
return max(nums)
return res[0]
if __name__ == "__main__":
nums = [3, 2]
print(Solution().thirdMax(nums))
|
# -*- coding: utf-8 -*-
"""
聚类离散化,最后结果的格式为:
1 2 3 4
A 0 0.178698 0.257724 0.351843
An 240 356.000000 281.000000 53.000000
...
即(0, 0.178698]有240个,(0.178698, 0.257724]有356个,依此类推其他项。
"""
import pandas as pd
from sklearn.cluster import KMeans
dataFile = './data/data.xls'
resultFile = './data/data_processed.xls'
label = {u'肝气郁结证型系数': 'A', u'热毒蕴结证型系数': 'B', u'冲任失调证型系数': 'C', u'气血两虚证型系数': 'D', u'脾胃虚弱证型系数': 'E', u'肝肾阴虚证型系数': 'F'}
k = 4 # 需要进行的聚类类别数
# 读取数据并进行聚类分析
data = pd.read_excel(dataFile) # 读取数据
keys = list(label.keys())
result = pd.DataFrame()
if __name__ == '__main__':
for i in range(len(keys)):
# 调用k-means算法,进行聚类离散化
print(u'正在进行“{}”的聚类...'.format(keys[i]))
kmodel = KMeans(n_clusters=k, n_jobs=4)
kmodel.fit(data[[keys[i]]].as_matrix())
r1 = pd.DataFrame(kmodel.cluster_centers_, columns=[label[keys[i]]])
r2 = pd.Series(kmodel.labels_).value_counts()
r2 = pd.DataFrame(r2, columns=[label[keys[i]] + 'n'])
r = pd.concat([r1, r2], axis=1).sort_values(label[keys[i]])
r.index = [1, 2, 3, 4]
# 这两句代码将原来的聚类中心改为边界点
r[label[keys[i]]] = r[label[keys[i]]].rolling(2).mean()
r[label[keys[i]]][1] = 0.0
result = result.append(r.T)
# 以Index排序,即以A,B,C,D,E,F顺序排
result = result.sort_index()
result.to_excel(resultFile) |
#!/user/bin/env python3
print("Hello, " + input("Please enter your name: ")+"!", " Happy " + input("What day of the week is it? ") + "!")
# using .format
print("Hello, {}! Happy {}!".format(input("What's your name? "), input("What day is it? ")))
# using f string
name = input("What's your name? ")
day = input("What day is it? ")
print(f"Hello, {name}! Happy {day}!")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-06-20 09:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('check', '0004_postmodel_hidung_tersumbat'),
]
operations = [
migrations.AddField(
model_name='postmodel',
name='Nafas_Berbunyi',
field=models.CharField(default=1, max_length=128),
preserve_default=False,
),
migrations.AddField(
model_name='postmodel',
name='Nafas_Tersedu_Sedu',
field=models.CharField(default=0, max_length=128),
preserve_default=False,
),
migrations.AddField(
model_name='postmodel',
name='Penurunan_Kesadaran',
field=models.CharField(default=0, max_length=128),
preserve_default=False,
),
]
|
'''
example of simple transformation for z-shifted catmaid data.
Expects c2z.py json in working directory
'''
import json
dumpfn = './20160503_export_FromDB.json' # TODO maybe make this date based?
outfn = './20160503_newnodedump.txt'
c2zfile = './c2z.json'
with open(c2zfile, 'r') as j:
c2z = json.load(j)
c2z = {int(k): int(v) for k, v in c2z.items()}
with open(dumpfn, 'r') as f:
proj = json.load(f)
outputs = []
for sid in proj['reconstructions']['skeletons'].keys():
for nid in proj['reconstructions']['skeletons'][sid]['trace'].keys():
nidps = proj['reconstructions']['skeletons'][sid]['trace'][nid]
x = nidps['xpix']
y = nidps['ypix']
z = c2z[int(float(nidps['zpix']))]
outputs.append('{} {} {} {} {} {}\n'.format(sid, nid, 'root', x, y, z))
for cid in proj['reconstructions']['connectors'].keys():
connps = proj['reconstructions']['connectors'][cid]
x = connps['xpix']
y = connps['ypix']
z = c2z[int(float(connps['zpix']))]
outputs.append('{} {} {} {} {} {}\n'.format(
'connector', cid, 'root', x, y, z))
with open(outfn, 'w') as f:
for ln in outputs:
f.write(ln)
|
# Generated by Django 2.2 on 2021-06-02 23:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0012_auto_20210602_2001'),
]
operations = [
migrations.AlterField(
model_name='categoria',
name='slug',
field=models.CharField(blank=True, max_length=100),
),
]
|
import random
from pico2d import *
class Target:
image = None
LEFT_RUN, RIGHT_RUN, LEFT_STAND, RIGHT_STAND = 0, 1, 2, 3
def __init__(self):
self.x, self.y = 0, 0 #선택대상의 좌표
self.select = None #선택 대상
if Target.image == None:
Target.image = load_image('target.png')
def draw(self):
if self.x != 0:
self.image.draw(self.x, self.y)
def update(self):
if self.select != None:
self.x = self.select.x;
self.y = self.select.y + 32;
def input(self,event, person):
if person.x + 16 > event.x:
if person.x - 16 < event.x:
if person.y + 16 > 640 - event.y:
if person.y - 16 < 640 - event.y:
self.select = person
def input_td(self,event, mob):
if mob.x + 16 > event.x:
if mob.x - 16 < event.x:
if mob.y + 16 > 640 - event.y:
if mob.y - 16 < 640 - event.y:
self.select.target = mob
self.select.px = mob.x
self.select.py = mob.y
def input_h(self,event, person):
if person.x + 16 > event.x:
if person.x - 16 < event.x:
if person.y + 16 > 640 - event.y:
if person.y - 16 < 640 - event.y:
self.select.target = person
self.select.px = person.x
self.select.py = person.y
|
# -*- coding: utf-8 -*-
# CONSTANTS - FOR MODEL-JS
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('../')
from codegen_consts import *
UNCAPITALISED_MODEL_NAME_MARKER = "{{uncapitalised_model_name}}"
SNAKE_CASE_MODEL_NAME_MARKER = "{{snake_case_model_name}}"
MODEL_DISPLAY_NAME_MARKER = "{{model_display_name}}"
CONTROLLER_TPL = BASE_URL + '/controller_js/controller_tpl.js'
MODULE_TPL = BASE_URL + '/controller_js/module_tpl.js'
ROUTE_CONFIG_TPL = BASE_URL + '/controller_js/config.route.js'
|
alumnos = ("Alberto", "Juan", "Daniel")
notas_de_Alberto = []
# para Alberto
while True:
nota = input(f"ingrese la primera nota de {alumnos [0]} : ")
notas_de_Alberto.append(nota)
nota = input(f"ingrese la segunda nota de {alumnos [0]} : ")
notas_de_Alberto.append(nota)
nota = input(f"ingrese la tercera nota de {alumnos [0]} : ")
notas_de_Alberto.append(nota)
nota = input(f"ingrese la cuarta nota de {alumnos [0]} : ")
notas_de_Alberto.append(nota)
nota = input(f"ingrese la quinta nota de {alumnos [0]} : ")
notas_de_Alberto.append(nota)
print(f"las notas de {alumnos [0]} son: {notas_de_Alberto} ")
notas_de_Alberto.sort()
print(f"La nota menor de {alumnos [0]} es : {notas_de_Alberto [0]}")
print(f"La nota mayor de {alumnos [0]} es : {notas_de_Alberto [4]}")
print(f"el promedio de las notas de {alumnos [0]} es : {(sum(int(i) for i in notas_de_Alberto)) / 5}")
break
print("#######################################################")
notas_de_Juan = []
while True:
nota = input(f"ingrese la primera nota de {alumnos [1]} : ")
notas_de_Juan.append(nota)
nota = input(f"ingrese la segunda nota de {alumnos [1]} : ")
notas_de_Juan.append(nota)
nota = input(f"ingrese la tercera nota de {alumnos [1]} : ")
notas_de_Juan.append(nota)
nota = input(f"ingrese la cuarta nota de {alumnos [1]} : ")
notas_de_Juan.append(nota)
nota = input(f"ingrese la quinta nota de {alumnos [1]} : ")
notas_de_Juan.append(nota)
print(f"Las notas de {alumnos [1]} son : {notas_de_Juan}")
notas_de_Juan.sort()
print(f"la nota menor es : {notas_de_Juan [0]}")
print(f"la nota mayor es : {notas_de_Juan [4]}")
print(f"el promedio de las notas de {alumnos [1]} es : {(sum(int(i) for i in notas_de_Juan)) / 5}")
break
print("########################################################")
notas_de_Daniel = []
while True:
nota = input(f"ingresa la primera nota de {alumnos [2]} : ")
notas_de_Daniel.append(nota)
nota = input(f"ingresa la segunda nota de {alumnos [2]} : ")
notas_de_Daniel.append(nota)
nota = input(f"ingresa la tercera nota de {alumnos [2]} : ")
notas_de_Daniel.append(nota)
nota = input(f"ingresa la cuarta nota de {alumnos [2]} : ")
notas_de_Daniel.append(nota)
nota = input(f"ingresa la quinta nota de {alumnos [2]} : ")
notas_de_Daniel.append(nota)
print(f"las notas de {alumnos [2]} son : {notas_de_Daniel}")
notas_de_Daniel.sort()
print(f"la nota menor es : {notas_de_Daniel [0]}")
print(f"la nota mayor es : {notas_de_Daniel [4]}")
print(f"el promedio de las notas de {alumnos [1]} es : {(sum(int(i) for i in notas_de_Daniel)) / 5}")
break |
def main():
array=input()
array=list(map(int,array.split()))
counter={}
for each in array:
counter[each]=array.count(each)
# print(max(counter,key=counter.get))
return len(array)-counter[max(counter,key=counter.get)]
def custom():
array=input()
array=list(map(int,array.split()))
lastIndex=max(array)
counter=[0]*(lastIndex+1)
for each in array:
counter[each]+=1
print(counter)
return len(array)-max(counter)
print("Res = ",custom()) |
from collections import deque
import numpy as np
class FrameStack():
def __init__(self, initial_frame, stack_size=4, preprocess_fn=None):
# Setup initial state
self.frame_stack = deque(maxlen=stack_size)
initial_frame = preprocess_fn(initial_frame) if preprocess_fn else initial_frame
for _ in range(stack_size):
self.frame_stack.append(initial_frame)
self.state = np.stack(self.frame_stack, axis=-1)
self.preprocess_fn = preprocess_fn
def add_frame(self, frame):
self.frame_stack.append(self.preprocess_fn(frame))
self.state = np.stack(self.frame_stack, axis=-1)
def get_state(self):
return self.state
class Scheduler():
def __init__(self, initial_value, interval, decay_factor):
self.interval = self.counter = interval
self.decay_factor = decay_factor
self.value_factor = 1
self.value = initial_value
def get_value(self):
self.counter -= 1
if self.counter < 0:
self.counter = self.interval
self.value *= self.decay_factor
return self.value
def calculate_expected_return(rewards, gamma):
expected_return = []
r = 0
for reward in rewards[::-1]: # for rewards from end to start
r = reward + gamma * r
expected_return.append(r)
return expected_return[::-1] # reverse so that we get the expected return from start to end |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import Client
import unittest
import logging
import json
logging.basicConfig(level=logging.INFO, format="%(message)s")
class GetLtlTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def testGetLtl(self):
resp = self.client.post('/english_to_ltl', {
'sentence': "The robot moves infinitely often",
'proposition': 's0',
'subjects': json.dumps([[
{'letter': 'r', 'subject': 'the robot', 'verb': 'move', 'object': ''},
]]),
})
data = json.loads(resp.content)
self.assertEqual(data, {
'ltl': "G F r"
})
|
'''
Sawyer Coleman
US07 Functions File
SSW_555_Agile_Methods
BHS_Project_SSW_555
Homework04
'''
from datetime import datetime
import calendar
'''Calculates Age'''
months = {"JAN": 1,"FEB": 2 ,"MAR": 3,"APR": 4,"MAY": 5,"JUN": 6,"JUL": 7,"AUG":8,"SEP": 9,"OCT": 10,"NOV": 11,"DEC": 12 }
#Calculates the age from date of birth to current date, assuming there is no date of death
def AgeLive(DoB):
# JRR: Your logic is correct, but you can simplify it to
# birth_date = datetime.strptime(DoB, '%d %b %Y')
# age = (datetime.today() - birth_date).days/365
# return age
birth = DoB.split(" ")[::-1]
birth[1] = months[birth[1]]
bDay = str(birth[0])
bYear = str(birth[2])
mn = str(birth[1])
birth_date_orig = bDay+"/"+mn+"/"+bYear
birth_date = datetime.strptime(birth_date_orig, '%Y/%m/%d')
age = (datetime.today() - birth_date).days/365
return age
#Calculates age from date of birth to date of death
def AgeDeath(DoB, DoD):
# JRR: Your logic is correct, but you can simplify it to
# birth_date = datetime.strptime(DoB, '%d %b %Y')
# death_date = datetime.strptime(DoD, '%d %b %Y')
# age = (death_date - birth_date).days/365
# return age
birth = DoB.split(" ")[::-1] # why reverse the string?
birth[1] = months[birth[1]]
mn = str(birth[1])
bDay = str(birth[0])
bYear = str(birth[2])
birth_date_orig = bDay+"/"+mn+"/"+bYear
birth_date = datetime.strptime(birth_date_orig, '%Y/%m/%d')
death = DoD.split(" ")[::-1]
death[1] = months[death[1]]
mn = str(death[1])
dDay = str(death[0])
dYear = str(death[2])
death_date_orig = dDay+"/"+mn+"/"+dYear
death_date = datetime.strptime(death_date_orig, '%Y/%m/%d')
age = (death_date - birth_date).days/365
return age
def US07(BirthDate = "N/A", DeathDate = "N/A"):
''' JRR: I like how you used optional parameters for the DeathDate. Just beware that BirthDate may be unknown as well.
GEDCOM allows you specify both but doesn't require it.
'''
# AgeLive = BHS_555.AgeLive(DoB)
# AgeDeath = BHS_555.AgeDeath(DoB, DoD)
if (DeathDate == "N/A"):
if (AgeLive(BirthDate) < 150):
return True
elif (AgeLive(BirthDate) >= 150):
#print("ERROR: INDIVIDUAL: US07: Individual "+ +" is older than 150 years old")
return False
elif (AgeDeath(BirthDate, DeathDate) < 150):
return True
elif (AgeDeath(BirthDate, DeathDate) >= 150):
#print("ERROR: INDIVIDUAL: US07: Individual "+ +" was older than 150 years old")
return False
|
import time
from src.plots.bar import AnimatePlot
from src.generate_data import generate
generate(kind='list', size = 20)
f = open('input.txt', 'r')
inp = f.read()
array = list(map(int, inp.split(',')))
plot = AnimatePlot("insertion_sort_Sort")
plot.update(array, 0, 0)
_len = len(array)
plot._len = _len
k = 1
for i in range(1, len(array)):
key = array[i]
j = i - 1
while(j >= 0 and array[j] > key):
plot.update(array, j, k, next=j+1)
array[j + 1], array[j] = array[j], array[j + 1]
#print(array)
j -= 1
k += 1
array[j + 1] = key
plot.update(array, j, k, next=j+1)
plot.CreateVideo() |
"""
Description: Simultaneous Perturbation Stochastic Approximation
Example Use:
"""
from tqdm import tqdm
import torch
from torch import nn
from ._fgsm import FGSM, FGM
from ._utils import clip
__all__ = ["SPSA"]
def SPSA(net, x, y_true, data_params, attack_params, loss_function="cross_entropy", verbose=False, progress_bar=False):
"""
Description: Basic Iterative Method
Input :
net : Neural Network (torch.nn.Module)
x : Inputs to the net (Batch)
y_true : Labels (Batch)
data_params : (dict)
x_min: Minimum possible value of x (min pixel value) (Float)
x_max: Maximum possible value of x (max pixel value) (Float)
attack_params : Attack parameters as a dictionary (dict)
norm : Norm of attack (Str)
eps : Attack budget (Float)
step_size : Attack budget for each iteration (Float)
num_steps : Number of iterations (Int)
verbose: check gradient masking (Bool)
progress_bar: Put progress bar (Bool)
Output:
perturbation : Perturbations for given batch (Batch)
Explanation:
e = zeros()
repeat num_steps:
e += delta * sign(grad_{x}(loss(net(x))))
"""
# setting parameters.requires_grad = False increases speed
if y_true is not None and len(x) != len(y_true):
raise ValueError(f"Number of inputs {len(x)} should match the number of labels {len(y_true)}")
if y_true is None:
y_true = torch.argmax(net(x), dim=1)
# Loss computation
criterion = nn.CrossEntropyLoss(reduction="none")
perturbation = torch.zeros_like(x, dtype=torch.float)
# Adding progress bar for iterations if progress_bar = True
if progress_bar:
iters = tqdm(
iterable=range(attack_params["num_steps"]),
desc="Attack Steps Progress",
unit="step",
leave=False)
else:
iters = range(attack_params["num_steps"])
for _ in iters:
with torch.no_grad():
if progress_bar:
samples = tqdm(
iterable=range(attack_params["num_samples"]),
desc="Vector Samples Progress",
unit="sample",
leave=False)
else:
samples = range(attack_params["num_samples"])
perturbation += grad_approx
# Clip perturbation if surpassed the norm bounds
if attack_params["norm"] == "inf":
perturbation = torch.clamp(
perturbation, -attack_params["eps"], attack_params["eps"])
perturbation.data = clip(
perturbation, data_params["x_min"] - x, data_params["x_max"] - x)
return perturbation
def _spsa_gradient():
# grad_approx = torch.zeros_like(x)
# for _ in samples:
# rand_vector = torch.sign(torch.randn_like(x))
# g = (criterion(net(x + perturbation + attack_params["eps"] * rand_vector), y_true) -
# criterion(net(x + perturbation - attack_params["eps"] * rand_vector), y_true)).view(-1, 1, 1, 1) * \
# rand_vector / (2 * attack_params["eps"])
# grad_approx += (attack_params["step_size"] / attack_params["num_samples"]) * g
pass
|
# Generated by Django 3.0.4 on 2020-03-21 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rentals', '0002_games'),
]
operations = [
migrations.AddField(
model_name='games',
name='genre',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='rentals.Genre'),
preserve_default=False,
),
]
|
import itertools
def factorial(n):
if n< 2:
return 1
return n * factorial(n-1)
def build_count(x, S):
ans = 1
for v in S:
if len(x[v])<3:
return 0
ans *= len(x[v])-2
return ans
def count_comp(g,S):
unseen = set(g.edges)
components = 0
while unseen:
components +=1
used_vertices = set()
e = unseen.pop()
if e[0] in S and e[1] in S:
components += g.edges[e]-1
continue
vertices_to_deal_with = set(e)
while vertices_to_deal_with:
v = vertices_to_deal_with.pop()
if v not in S:
used_vertices.add(v)
for e2 in list(unseen):
if v in e2:
unseen.remove(e2)
for w in e2:
if w not in used_vertices:
vertices_to_deal_with.add(w)
return components
def count(g,i):
oneside = (g.stable_poly_normalized[i][-1], g.poincare_denom_power[i]-1)
best_count = 0
sum_of_those = 0
star_iterator = range(len(g.stars))
for S in itertools.combinations(star_iterator, i):
components = count_comp(g,S)
# print (S, components)
if components > best_count:
sum_of_those = build_count(g.stars,S)
best_count = components
elif components == best_count:
sum_of_those += build_count(g.stars,S)
otherside = (sum_of_those, best_count-1)
# print (oneside, otherside)
if factorial(oneside[1])*otherside[0] != factorial(otherside[1])*oneside[0]:
return False
print ('failure with graph {} and degree {}'.format(g.sparse6, i))
return True
def test(gg):
ans = set()
for g in gg:
for i in range(2, len(g.stable_poly_normalized)):
if not count(g,i):
ans.add(g)
return ans |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.