hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f716ca32bfbce92f904eda519d192105b6956caa
| 3,447
|
py
|
Python
|
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
|
vitorfs/cmdbox
|
97806c02caf5947ec855286212e61db714e3fb02
|
[
"MIT"
] | 1
|
2019-09-07T11:49:11.000Z
|
2019-09-07T11:49:11.000Z
|
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
|
vitorfs/cmdbox
|
97806c02caf5947ec855286212e61db714e3fb02
|
[
"MIT"
] | null | null | null |
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
|
vitorfs/cmdbox
|
97806c02caf5947ec855286212e61db714e3fb02
|
[
"MIT"
] | 2
|
2018-09-04T08:33:17.000Z
|
2020-09-18T20:26:46.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-04 20:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scaffold_templates', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
('extension', models.CharField(blank=True, max_length=10, null=True, verbose_name='extension')),
('size', models.PositiveIntegerField(default=0, verbose_name='size')),
],
options={
'verbose_name': 'file',
'verbose_name_plural': 'files',
},
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
],
options={
'verbose_name': 'folder',
'verbose_name_plural': 'folders',
},
),
migrations.AlterModelOptions(
name='scaffoldtemplate',
options={'ordering': ('-updated_at',), 'verbose_name': 'scaffold template', 'verbose_name_plural': 'scaffold template'},
),
migrations.AlterField(
model_name='scaffoldtemplate',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scaffoldtemplates', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='scaffoldtemplate',
unique_together=set([('user', 'slug')]),
),
migrations.AddField(
model_name='folder',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='folders', to='scaffold_templates.ScaffoldTemplate'),
),
migrations.AddField(
model_name='file',
name='folder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.Folder'),
),
migrations.AddField(
model_name='file',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.ScaffoldTemplate'),
),
migrations.AlterUniqueTogether(
name='folder',
unique_together=set([('template', 'name')]),
),
migrations.AlterUniqueTogether(
name='file',
unique_together=set([('template', 'name')]),
),
]
| 42.036585
| 158
| 0.59385
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scaffold_templates', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
('extension', models.CharField(blank=True, max_length=10, null=True, verbose_name='extension')),
('size', models.PositiveIntegerField(default=0, verbose_name='size')),
],
options={
'verbose_name': 'file',
'verbose_name_plural': 'files',
},
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
],
options={
'verbose_name': 'folder',
'verbose_name_plural': 'folders',
},
),
migrations.AlterModelOptions(
name='scaffoldtemplate',
options={'ordering': ('-updated_at',), 'verbose_name': 'scaffold template', 'verbose_name_plural': 'scaffold template'},
),
migrations.AlterField(
model_name='scaffoldtemplate',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scaffoldtemplates', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='scaffoldtemplate',
unique_together=set([('user', 'slug')]),
),
migrations.AddField(
model_name='folder',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='folders', to='scaffold_templates.ScaffoldTemplate'),
),
migrations.AddField(
model_name='file',
name='folder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.Folder'),
),
migrations.AddField(
model_name='file',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.ScaffoldTemplate'),
),
migrations.AlterUniqueTogether(
name='folder',
unique_together=set([('template', 'name')]),
),
migrations.AlterUniqueTogether(
name='file',
unique_together=set([('template', 'name')]),
),
]
| true
| true
|
f716cbce48a8f8203417dcba6fc313bd1d90bcd9
| 5,760
|
py
|
Python
|
test/visualization/test_visualization.py
|
chrhck/pyABC
|
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
|
[
"BSD-3-Clause"
] | null | null | null |
test/visualization/test_visualization.py
|
chrhck/pyABC
|
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
|
[
"BSD-3-Clause"
] | null | null | null |
test/visualization/test_visualization.py
|
chrhck/pyABC
|
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
|
[
"BSD-3-Clause"
] | null | null | null |
import pyabc
import tempfile
import pytest
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# create and run some model
def model(p):
return {'ss0': p['p0'] + 0.1 * np.random.uniform(),
'ss1': p['p1'] + 0.1 * np.random.uniform()}
p_true = {'p0': 3, 'p1': 4}
observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']}
limits = {'p0': (0, 5), 'p1': (1, 8)}
prior = pyabc.Distribution(**{
key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0])
for key in p_true.keys()})
db_path = "sqlite:///" \
+ os.path.join(tempfile.gettempdir(), "test_visualize.db")
distance = pyabc.PNormDistance(p=2)
n_history = 2
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
for _ in range(n_history):
abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler)
abc.new(db_path, observation)
abc.run(minimum_epsilon=.1, max_nr_populations=3)
histories = []
labels = []
for j in range(n_history):
history = pyabc.History(db_path)
history.id = j + 1
histories.append(history)
labels.append("Some run " + str(j))
def test_epsilons():
pyabc.visualization.plot_epsilons(histories, labels)
plt.close()
def test_sample_numbers():
pyabc.visualization.plot_sample_numbers(
histories, rotation=43, size=(5, 5))
_, ax = plt.subplots()
pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax)
with pytest.raises(ValueError):
pyabc.visualization.plot_sample_numbers(histories, [labels[0]])
plt.close()
def test_sample_numbers_trajectory():
pyabc.visualization.plot_sample_numbers_trajectory(
histories, labels, yscale='log', rotation=90)
_, ax = plt.subplots()
pyabc.visualization.plot_sample_numbers_trajectory(
histories, labels, yscale='log10', size=(8, 8), ax=ax)
plt.close()
def test_acceptance_rates_trajectory():
pyabc.visualization.plot_acceptance_rates_trajectory(
histories, labels, yscale='log', rotation=76)
_, ax = plt.subplots()
pyabc.visualization.plot_acceptance_rates_trajectory(
histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax)
plt.close()
def test_total_sample_numbers():
pyabc.visualization.plot_total_sample_numbers(histories)
pyabc.visualization.plot_total_sample_numbers(
histories, labels, yscale='log', size=(10, 5))
_, ax = plt.subplots()
pyabc.visualization.plot_total_sample_numbers(
histories, rotation=75, yscale='log10', ax=ax)
plt.close()
def test_effective_sample_sizes():
pyabc.visualization.plot_effective_sample_sizes(
histories, labels, rotation=45, relative=True)
plt.close()
def test_histograms():
# 1d
pyabc.visualization.plot_histogram_1d(
histories[0], 'p0', bins=20,
xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true)
# 2d
pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1')
pyabc.visualization.plot_histogram_2d(
histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1],
ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true)
# matrix
pyabc.visualization.plot_histogram_matrix(
histories[0], bins=1000, size=(6, 7), refval=p_true)
plt.close()
def test_kdes():
history = histories[0]
df, w = history.get_distribution(m=0, t=None)
pyabc.visualization.plot_kde_1d(
df, w, x='p0',
xmin=limits['p0'][0], xmax=limits['p0'][1],
label="PDF")
pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1')
pyabc.visualization.plot_kde_matrix(df, w)
# also use the highlevel interfaces
pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5),
refval=p_true)
pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1',
size=(7, 5),
refval=p_true)
pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43,
refval=p_true)
plt.close()
def test_credible_intervals():
pyabc.visualization.plot_credible_intervals(histories[0])
pyabc.visualization.plot_credible_intervals(
histories[0], levels=[0.2, 0.5, 0.9],
show_kde_max_1d=True, show_kde_max=True, show_mean=True,
refval=p_true)
pyabc.visualization.plot_credible_intervals_for_time(
histories, levels=[0.5, 0.99],
show_kde_max_1d=True, show_kde_max=True, show_mean=True,
refvals=p_true)
plt.close()
def test_model_probabilities():
pyabc.visualization.plot_model_probabilities(histories[0])
plt.close()
def test_data_callback():
def plot_data(sum_stat, weight, ax, **kwargs):
ax.plot(sum_stat['ss0'], alpha=weight, **kwargs)
def plot_data_aggregated(sum_stats, weights, ax, **kwargs):
data = np.array([sum_stat['ss0'] for sum_stat in sum_stats])
weights = np.array(weights).reshape((-1, 1))
mean = (data * weights).sum(axis=0)
plot_data({'ss0': mean}, 1.0, ax)
pyabc.visualization.plot_data_callback(
histories[0], plot_data, plot_data_aggregated)
def test_data_default():
obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]),
3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})}
sim_dict = {1: 6.5, 2: np.array([32, 5, 6]),
3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})}
pyabc.visualization.plot_data_default(obs_dict, sim_dict)
for i in range(5):
obs_dict[i] = i + 1
sim_dict[i] = i + 2
pyabc.visualization.plot_data_default(obs_dict, sim_dict)
plt.close()
| 32.542373
| 79
| 0.64375
|
import pyabc
import tempfile
import pytest
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def model(p):
return {'ss0': p['p0'] + 0.1 * np.random.uniform(),
'ss1': p['p1'] + 0.1 * np.random.uniform()}
p_true = {'p0': 3, 'p1': 4}
observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']}
limits = {'p0': (0, 5), 'p1': (1, 8)}
prior = pyabc.Distribution(**{
key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0])
for key in p_true.keys()})
db_path = "sqlite:///" \
+ os.path.join(tempfile.gettempdir(), "test_visualize.db")
distance = pyabc.PNormDistance(p=2)
n_history = 2
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
for _ in range(n_history):
abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler)
abc.new(db_path, observation)
abc.run(minimum_epsilon=.1, max_nr_populations=3)
histories = []
labels = []
for j in range(n_history):
history = pyabc.History(db_path)
history.id = j + 1
histories.append(history)
labels.append("Some run " + str(j))
def test_epsilons():
pyabc.visualization.plot_epsilons(histories, labels)
plt.close()
def test_sample_numbers():
pyabc.visualization.plot_sample_numbers(
histories, rotation=43, size=(5, 5))
_, ax = plt.subplots()
pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax)
with pytest.raises(ValueError):
pyabc.visualization.plot_sample_numbers(histories, [labels[0]])
plt.close()
def test_sample_numbers_trajectory():
pyabc.visualization.plot_sample_numbers_trajectory(
histories, labels, yscale='log', rotation=90)
_, ax = plt.subplots()
pyabc.visualization.plot_sample_numbers_trajectory(
histories, labels, yscale='log10', size=(8, 8), ax=ax)
plt.close()
def test_acceptance_rates_trajectory():
pyabc.visualization.plot_acceptance_rates_trajectory(
histories, labels, yscale='log', rotation=76)
_, ax = plt.subplots()
pyabc.visualization.plot_acceptance_rates_trajectory(
histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax)
plt.close()
def test_total_sample_numbers():
pyabc.visualization.plot_total_sample_numbers(histories)
pyabc.visualization.plot_total_sample_numbers(
histories, labels, yscale='log', size=(10, 5))
_, ax = plt.subplots()
pyabc.visualization.plot_total_sample_numbers(
histories, rotation=75, yscale='log10', ax=ax)
plt.close()
def test_effective_sample_sizes():
pyabc.visualization.plot_effective_sample_sizes(
histories, labels, rotation=45, relative=True)
plt.close()
def test_histograms():
pyabc.visualization.plot_histogram_1d(
histories[0], 'p0', bins=20,
xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true)
pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1')
pyabc.visualization.plot_histogram_2d(
histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1],
ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true)
pyabc.visualization.plot_histogram_matrix(
histories[0], bins=1000, size=(6, 7), refval=p_true)
plt.close()
def test_kdes():
history = histories[0]
df, w = history.get_distribution(m=0, t=None)
pyabc.visualization.plot_kde_1d(
df, w, x='p0',
xmin=limits['p0'][0], xmax=limits['p0'][1],
label="PDF")
pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1')
pyabc.visualization.plot_kde_matrix(df, w)
pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5),
refval=p_true)
pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1',
size=(7, 5),
refval=p_true)
pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43,
refval=p_true)
plt.close()
def test_credible_intervals():
pyabc.visualization.plot_credible_intervals(histories[0])
pyabc.visualization.plot_credible_intervals(
histories[0], levels=[0.2, 0.5, 0.9],
show_kde_max_1d=True, show_kde_max=True, show_mean=True,
refval=p_true)
pyabc.visualization.plot_credible_intervals_for_time(
histories, levels=[0.5, 0.99],
show_kde_max_1d=True, show_kde_max=True, show_mean=True,
refvals=p_true)
plt.close()
def test_model_probabilities():
pyabc.visualization.plot_model_probabilities(histories[0])
plt.close()
def test_data_callback():
def plot_data(sum_stat, weight, ax, **kwargs):
ax.plot(sum_stat['ss0'], alpha=weight, **kwargs)
def plot_data_aggregated(sum_stats, weights, ax, **kwargs):
data = np.array([sum_stat['ss0'] for sum_stat in sum_stats])
weights = np.array(weights).reshape((-1, 1))
mean = (data * weights).sum(axis=0)
plot_data({'ss0': mean}, 1.0, ax)
pyabc.visualization.plot_data_callback(
histories[0], plot_data, plot_data_aggregated)
def test_data_default():
obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]),
3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})}
sim_dict = {1: 6.5, 2: np.array([32, 5, 6]),
3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})}
pyabc.visualization.plot_data_default(obs_dict, sim_dict)
for i in range(5):
obs_dict[i] = i + 1
sim_dict[i] = i + 2
pyabc.visualization.plot_data_default(obs_dict, sim_dict)
plt.close()
| true
| true
|
f716cd537ee2ce3b739c2b138de0ba36abc67390
| 8,949
|
py
|
Python
|
tools/vsnp/vsnp_statistics.py
|
supernord/tools-iuc
|
9a0c41967765d120a8fc519c0c7f09cbe3a6efbe
|
[
"MIT"
] | 1
|
2019-07-05T13:19:51.000Z
|
2019-07-05T13:19:51.000Z
|
tools/vsnp/vsnp_statistics.py
|
mtekman/tools-iuc
|
95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5
|
[
"MIT"
] | 8
|
2019-05-27T20:54:44.000Z
|
2021-10-04T09:33:30.000Z
|
tools/vsnp/vsnp_statistics.py
|
mtekman/tools-iuc
|
95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import csv
import gzip
import os
from functools import partial
import numpy
import pandas
from Bio import SeqIO
def nice_size(size):
# Returns a readably formatted string with the size
words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
prefix = ''
try:
size = float(size)
if size < 0:
size = abs(size)
prefix = '-'
except Exception:
return '??? bytes'
for ind, word in enumerate(words):
step = 1024 ** (ind + 1)
if step > size:
size = size / float(1024 ** ind)
if word == 'bytes': # No decimals for bytes
return "%s%d bytes" % (prefix, size)
return "%s%.1f %s" % (prefix, size, word)
return '??? bytes'
def output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
# Produce an Excel spreadsheet that
# contains a row for each sample.
columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
data_frames = []
for i, fastq_file in enumerate(fastq_files):
idxstats_file = idxstats_files[i]
metrics_file = metrics_files[i]
file_name_base = os.path.basename(fastq_file)
# Read fastq_file into a data frame.
_open = partial(gzip.open, mode='rt') if gzipped else open
with _open(fastq_file) as fh:
identifiers = []
seqs = []
letter_annotations = []
for seq_record in SeqIO.parse(fh, "fastq"):
identifiers.append(seq_record.id)
seqs.append(seq_record.seq)
letter_annotations.append(seq_record.letter_annotations["phred_quality"])
# Convert lists to Pandas series.
s1 = pandas.Series(identifiers, name='id')
s2 = pandas.Series(seqs, name='seq')
# Gather Series into a data frame.
fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id'])
total_reads = int(len(fastq_df.index) / 4)
current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
# Reference
current_sample_df.at[file_name_base, 'Reference'] = dbkey
# File Size
current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
# Mean Read Length
sampling_size = 10000
if sampling_size > total_reads:
sampling_size = total_reads
fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
dict_mean = {}
list_length = []
i = 0
for id, seq, in fastq_df.iterrows():
dict_mean[id] = numpy.mean(letter_annotations[i])
list_length.append(len(seq.array[0]))
i += 1
current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length)
# Mean Read Quality
df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean()
# Reads Passing Q30
reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size)
current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
# Total Reads
current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
# All Mapped Reads
all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
# Unmapped Reads
current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
# Unmapped Reads Percentage of Total
if unmapped_reads > 0:
unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads)
else:
unmapped_reads_percentage = 0
current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
# Reference with Coverage
ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
# Average Depth of Coverage
current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
# Good SNP Count
current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
data_frames.append(current_sample_df)
output_df = pandas.concat(data_frames)
output_df.to_csv(output_file, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\\')
def process_idxstats_file(idxstats_file):
all_mapped_reads = 0
unmapped_reads = 0
with open(idxstats_file, "r") as fh:
for i, line in enumerate(fh):
line = line.rstrip('\r\n')
items = line.split("\t")
if i == 0:
# NC_002945.4 4349904 213570 4047
all_mapped_reads = int(items[2])
elif i == 1:
# * 0 0 82774
unmapped_reads = int(items[3])
return all_mapped_reads, unmapped_reads
def process_metrics_file(metrics_file):
ref_with_coverage = '0%'
avg_depth_of_coverage = 0
good_snp_count = 0
with open(metrics_file, "r") as ifh:
for i, line in enumerate(ifh):
if i == 0:
# Skip comments.
continue
line = line.rstrip('\r\n')
items = line.split("\t")
if i == 1:
# MarkDuplicates 10.338671 98.74%
ref_with_coverage = items[3]
avg_depth_of_coverage = items[2]
elif i == 2:
# VCFfilter 611
good_snp_count = items[1]
return ref_with_coverage, avg_depth_of_coverage, good_snp_count
parser = argparse.ArgumentParser()
parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped')
parser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory')
parser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory')
parser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory')
parser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads')
parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats')
parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage')
args = parser.parse_args()
fastq_files = []
idxstats_files = []
metrics_files = []
# Accumulate inputs.
if args.read1 is not None:
# The inputs are not dataset collections, so
# read1, read2 (possibly) and vsnp_azc will also
# not be None.
fastq_files.append(args.read1)
idxstats_files.append(args.samtools_idxstats)
metrics_files.append(args.vsnp_azc)
if args.read2 is not None:
fastq_files.append(args.read2)
idxstats_files.append(args.samtools_idxstats)
metrics_files.append(args.vsnp_azc)
else:
for file_name in sorted(os.listdir(args.input_reads_dir)):
fastq_files.append(os.path.join(args.input_reads_dir, file_name))
for file_name in sorted(os.listdir(args.input_idxstats_dir)):
idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
if args.list_paired:
# Add the idxstats file for reverse.
idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
for file_name in sorted(os.listdir(args.input_metrics_dir)):
metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
if args.list_paired:
# Add the metrics file for reverse.
metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
output_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
| 46.128866
| 169
| 0.658956
|
import argparse
import csv
import gzip
import os
from functools import partial
import numpy
import pandas
from Bio import SeqIO
def nice_size(size):
words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
prefix = ''
try:
size = float(size)
if size < 0:
size = abs(size)
prefix = '-'
except Exception:
return '??? bytes'
for ind, word in enumerate(words):
step = 1024 ** (ind + 1)
if step > size:
size = size / float(1024 ** ind)
if word == 'bytes':
return "%s%d bytes" % (prefix, size)
return "%s%.1f %s" % (prefix, size, word)
return '??? bytes'
def output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
data_frames = []
for i, fastq_file in enumerate(fastq_files):
idxstats_file = idxstats_files[i]
metrics_file = metrics_files[i]
file_name_base = os.path.basename(fastq_file)
_open = partial(gzip.open, mode='rt') if gzipped else open
with _open(fastq_file) as fh:
identifiers = []
seqs = []
letter_annotations = []
for seq_record in SeqIO.parse(fh, "fastq"):
identifiers.append(seq_record.id)
seqs.append(seq_record.seq)
letter_annotations.append(seq_record.letter_annotations["phred_quality"])
s1 = pandas.Series(identifiers, name='id')
s2 = pandas.Series(seqs, name='seq')
fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id'])
total_reads = int(len(fastq_df.index) / 4)
current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
current_sample_df.at[file_name_base, 'Reference'] = dbkey
current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
sampling_size = 10000
if sampling_size > total_reads:
sampling_size = total_reads
fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
dict_mean = {}
list_length = []
i = 0
for id, seq, in fastq_df.iterrows():
dict_mean[id] = numpy.mean(letter_annotations[i])
list_length.append(len(seq.array[0]))
i += 1
current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length)
df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean()
reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size)
current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
if unmapped_reads > 0:
unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads)
else:
unmapped_reads_percentage = 0
current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
data_frames.append(current_sample_df)
output_df = pandas.concat(data_frames)
output_df.to_csv(output_file, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\\')
def process_idxstats_file(idxstats_file):
all_mapped_reads = 0
unmapped_reads = 0
with open(idxstats_file, "r") as fh:
for i, line in enumerate(fh):
line = line.rstrip('\r\n')
items = line.split("\t")
if i == 0:
all_mapped_reads = int(items[2])
elif i == 1:
unmapped_reads = int(items[3])
return all_mapped_reads, unmapped_reads
def process_metrics_file(metrics_file):
ref_with_coverage = '0%'
avg_depth_of_coverage = 0
good_snp_count = 0
with open(metrics_file, "r") as ifh:
for i, line in enumerate(ifh):
if i == 0:
continue
line = line.rstrip('\r\n')
items = line.split("\t")
if i == 1:
ref_with_coverage = items[3]
avg_depth_of_coverage = items[2]
elif i == 2:
good_snp_count = items[1]
return ref_with_coverage, avg_depth_of_coverage, good_snp_count
parser = argparse.ArgumentParser()
parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped')
parser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory')
parser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory')
parser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory')
parser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads')
parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats')
parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage')
args = parser.parse_args()
fastq_files = []
idxstats_files = []
metrics_files = []
if args.read1 is not None:
fastq_files.append(args.read1)
idxstats_files.append(args.samtools_idxstats)
metrics_files.append(args.vsnp_azc)
if args.read2 is not None:
fastq_files.append(args.read2)
idxstats_files.append(args.samtools_idxstats)
metrics_files.append(args.vsnp_azc)
else:
for file_name in sorted(os.listdir(args.input_reads_dir)):
fastq_files.append(os.path.join(args.input_reads_dir, file_name))
for file_name in sorted(os.listdir(args.input_idxstats_dir)):
idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
if args.list_paired:
idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
for file_name in sorted(os.listdir(args.input_metrics_dir)):
metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
if args.list_paired:
metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
output_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
| true
| true
|
f716cd5d7134a3961ffe080fd5955660a50ac9e2
| 4,759
|
py
|
Python
|
augmented_seq2seq/datasets/friends/data.py
|
jsedoc/nn_chatbot
|
7b4406687bad2efa14658cb5aa137065cd325073
|
[
"MIT"
] | 2
|
2017-07-22T15:34:02.000Z
|
2017-12-07T07:28:56.000Z
|
augmented_seq2seq/datasets/friends/data.py
|
jsedoc/nn_chatbot
|
7b4406687bad2efa14658cb5aa137065cd325073
|
[
"MIT"
] | null | null | null |
augmented_seq2seq/datasets/friends/data.py
|
jsedoc/nn_chatbot
|
7b4406687bad2efa14658cb5aa137065cd325073
|
[
"MIT"
] | 3
|
2017-06-09T10:30:22.000Z
|
2020-02-25T02:29:58.000Z
|
FILENAME = 'sequences_full.csv'
VOCAB_SIZE = None
UNK = 'UNK'
POS_TAGS = { 'CC' : '<CC>', 'CD' : '<CD>', 'DT' : '<DT>', 'EX' : '<EX>', 'FW' : '<FW>', 'IN' : '<IN>', 'JJ' : '<JJ>', 'JJR' : '<JJR>', 'JJS' : '<JJS>', 'LS' : '<LS>', 'MD' : '<MD>', 'NN' : '<NN>', 'NNS' : '<NNS>', 'NNP' : '<NNP>', 'NNPS' : '<NNPS>', 'PDT' : '<PDT>', 'POS' : '<POS>', 'PRP' : '<PRP>', 'PRP' : '<PRP>', 'RB' : '<RB>', 'RBR' : '<RBR>', 'RBS' : '<RBS>', 'RP' : '<RP>', 'SYM' : '<SYM>', 'TO' : '<TO>', 'UH' : '<UH>', 'VB' : '<VB>', 'VBD' : '<VBD>', 'VBG' : '<VBG>', 'VBN' : '<VBN>', 'VBP' : '<VBP>', 'VBZ' : '<VBZ>', 'WDT' : '<WDT>', 'WP' : '<WP>', 'WP$' : '<WP$>', 'WRB' : '<WRB>' }
# imports : in the order of usage
import itertools
import nltk
import random
import sys
import pickle
'''
read lines from file
return [list of lines]
'''
def read_lines(filename):
return fix_win_encode(open(filename).read()).split('\n')[1:-1]
def fix_win_encode(text):
return text.replace('\x92', "'").replace('\x97', ' ').replace('\x91', '').replace('_b_','').replace('*','').replace('\x93','')
'''
split each row of form "query |respect| response"
to [ query, response, respect ]
'''
def split_row(lines):
q,r,respect = [], [], []
for line in lines:
line = line.split('|')
r.append(split_and_tag(line[0]))
q.append(split_and_tag(line[-1]))
respect.append(int(line[1]))
return q,r,respect
'''
split sentences into words and tags with nltk
replace foreign words and numbers
into <FW> and <CD> tags
'''
def split_and_tag(line):
wtags = nltk.pos_tag(nltk.word_tokenize(line.strip()))
words = []
for w,t in wtags:
if t == 'CD' or t == 'FW':
w = t
words.append(w)
return words
'''
read list of words, create index to word,
word to index dictionaries
return tuple( vocab->(word, count), idx2w, w2idx )
'''
def index_(tokenized_sentences, vocab_size):
# get frequency distribution
freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))
# get vocabulary of 'vocab_size' most used words
vocab = freq_dist.most_common(vocab_size)
vocab = [ item for item in vocab if item[1] > 1 ]
# index2word
index2word = ['_'] + ['UNK'] + list(POS_TAGS.keys()) + [ x[0] for x in vocab ]
# word2index
word2index = dict([(w,i) for i,w in enumerate(index2word)] )
return index2word, word2index, freq_dist
'''
There will be no zero padding!
'''
def encode(q, r, w2idx):
# num of rows
data_len = len(q)
idx_q, idx_r = [], []
for i in range(data_len):
idx_q.append(encode_seq(q[i], w2idx))
idx_r.append(encode_seq(r[i], w2idx))
return idx_q, idx_r
'''
replace words with indices in a sequence
replace with unknown if word not in lookup
return [list of indices]
'''
def encode_seq(seq, lookup):
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
tag = nltk.pos_tag([word])[-1][-1]
if tag in lookup:
indices.append(lookup[tag])
else:
indices.append(lookup[UNK])
return indices
def process_data():
print('\n>> Read lines from file')
lines = read_lines(filename=FILENAME)
# change to lower case
lines = [ line.lower() for line in lines ]
print('>> [read_lines] {} lines;\nexamples\n{}'.
format(len(lines), lines[121:125]))
# split row into query, response and respect
q, r, respect = split_row(lines)
print('\n>> [split_row] \n{} {} {}'.
format( q[121:125], r[121:125], respect[121:125]))
#############
# NL pipeline
####
##
# [1] Spell Check
#
# [2] POS tagging
# indexing -> idx2w, w2idx : en/ta
print('\n >> Index words')
idx2w, w2idx, freq_dist = index_(q+r, vocab_size=None)
idx_q, idx_r = encode(q, r, w2idx)
data = {
'q' : idx_q,
'r' : idx_r,
'respect' : respect
}
# let us now save the necessary dictionaries
metadata = {
'w2idx' : w2idx,
'idx2w' : idx2w,
'freq_dist' : freq_dist,
'respect_size' : max(respect) + 1
}
# write to disk : data control dictionaries
with open('metadata.pkl', 'wb') as f:
pickle.dump(metadata, f)
with open('data.pkl', 'wb') as f:
pickle.dump(data, f)
def load_data(PATH=''):
# read data control dictionaries
with open(PATH + 'metadata.pkl', 'rb') as f:
metadata = pickle.load(f)
with open(PATH + 'data.pkl', 'rb') as f:
data = pickle.load(f)
return data, metadata
if __name__ == '__main__':
process_data()
| 25.86413
| 595
| 0.548645
|
FILENAME = 'sequences_full.csv'
VOCAB_SIZE = None
UNK = 'UNK'
POS_TAGS = { 'CC' : '<CC>', 'CD' : '<CD>', 'DT' : '<DT>', 'EX' : '<EX>', 'FW' : '<FW>', 'IN' : '<IN>', 'JJ' : '<JJ>', 'JJR' : '<JJR>', 'JJS' : '<JJS>', 'LS' : '<LS>', 'MD' : '<MD>', 'NN' : '<NN>', 'NNS' : '<NNS>', 'NNP' : '<NNP>', 'NNPS' : '<NNPS>', 'PDT' : '<PDT>', 'POS' : '<POS>', 'PRP' : '<PRP>', 'PRP' : '<PRP>', 'RB' : '<RB>', 'RBR' : '<RBR>', 'RBS' : '<RBS>', 'RP' : '<RP>', 'SYM' : '<SYM>', 'TO' : '<TO>', 'UH' : '<UH>', 'VB' : '<VB>', 'VBD' : '<VBD>', 'VBG' : '<VBG>', 'VBN' : '<VBN>', 'VBP' : '<VBP>', 'VBZ' : '<VBZ>', 'WDT' : '<WDT>', 'WP' : '<WP>', 'WP$' : '<WP$>', 'WRB' : '<WRB>' }
import itertools
import nltk
import random
import sys
import pickle
def read_lines(filename):
return fix_win_encode(open(filename).read()).split('\n')[1:-1]
def fix_win_encode(text):
return text.replace('\x92', "'").replace('\x97', ' ').replace('\x91', '').replace('_b_','').replace('*','').replace('\x93','')
def split_row(lines):
q,r,respect = [], [], []
for line in lines:
line = line.split('|')
r.append(split_and_tag(line[0]))
q.append(split_and_tag(line[-1]))
respect.append(int(line[1]))
return q,r,respect
def split_and_tag(line):
wtags = nltk.pos_tag(nltk.word_tokenize(line.strip()))
words = []
for w,t in wtags:
if t == 'CD' or t == 'FW':
w = t
words.append(w)
return words
def index_(tokenized_sentences, vocab_size):
# get frequency distribution
freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))
# get vocabulary of 'vocab_size' most used words
vocab = freq_dist.most_common(vocab_size)
vocab = [ item for item in vocab if item[1] > 1 ]
# index2word
index2word = ['_'] + ['UNK'] + list(POS_TAGS.keys()) + [ x[0] for x in vocab ]
# word2index
word2index = dict([(w,i) for i,w in enumerate(index2word)] )
return index2word, word2index, freq_dist
def encode(q, r, w2idx):
# num of rows
data_len = len(q)
idx_q, idx_r = [], []
for i in range(data_len):
idx_q.append(encode_seq(q[i], w2idx))
idx_r.append(encode_seq(r[i], w2idx))
return idx_q, idx_r
def encode_seq(seq, lookup):
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
tag = nltk.pos_tag([word])[-1][-1]
if tag in lookup:
indices.append(lookup[tag])
else:
indices.append(lookup[UNK])
return indices
def process_data():
print('\n>> Read lines from file')
lines = read_lines(filename=FILENAME)
# change to lower case
lines = [ line.lower() for line in lines ]
print('>> [read_lines] {} lines;\nexamples\n{}'.
format(len(lines), lines[121:125]))
# split row into query, response and respect
q, r, respect = split_row(lines)
print('\n>> [split_row] \n{} {} {}'.
format( q[121:125], r[121:125], respect[121:125]))
#############
# NL pipeline
####
##
# [1] Spell Check
#
# [2] POS tagging
# indexing -> idx2w, w2idx : en/ta
print('\n >> Index words')
idx2w, w2idx, freq_dist = index_(q+r, vocab_size=None)
idx_q, idx_r = encode(q, r, w2idx)
data = {
'q' : idx_q,
'r' : idx_r,
'respect' : respect
}
# let us now save the necessary dictionaries
metadata = {
'w2idx' : w2idx,
'idx2w' : idx2w,
'freq_dist' : freq_dist,
'respect_size' : max(respect) + 1
}
# write to disk : data control dictionaries
with open('metadata.pkl', 'wb') as f:
pickle.dump(metadata, f)
with open('data.pkl', 'wb') as f:
pickle.dump(data, f)
def load_data(PATH=''):
# read data control dictionaries
with open(PATH + 'metadata.pkl', 'rb') as f:
metadata = pickle.load(f)
with open(PATH + 'data.pkl', 'rb') as f:
data = pickle.load(f)
return data, metadata
if __name__ == '__main__':
process_data()
| true
| true
|
f716cd6b0558dfb762b9277089b16cb9576044b2
| 1,613
|
py
|
Python
|
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
|
6enno/FarmXero
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Xero Payroll AU
This is the Xero Payroll API for orgs in Australia region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class Timesheets(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"timesheets": "list[Timesheet]"}
attribute_map = {"timesheets": "Timesheets"}
def __init__(self, timesheets=None): # noqa: E501
"""Timesheets - a model defined in OpenAPI""" # noqa: E501
self._timesheets = None
self.discriminator = None
if timesheets is not None:
self.timesheets = timesheets
@property
def timesheets(self):
"""Gets the timesheets of this Timesheets. # noqa: E501
:return: The timesheets of this Timesheets. # noqa: E501
:rtype: list[Timesheet]
"""
return self._timesheets
@timesheets.setter
def timesheets(self, timesheets):
"""Sets the timesheets of this Timesheets.
:param timesheets: The timesheets of this Timesheets. # noqa: E501
:type: list[Timesheet]
"""
self._timesheets = timesheets
| 24.815385
| 76
| 0.619343
|
import re
from xero_python.models import BaseModel
class Timesheets(BaseModel):
openapi_types = {"timesheets": "list[Timesheet]"}
attribute_map = {"timesheets": "Timesheets"}
def __init__(self, timesheets=None):
self._timesheets = None
self.discriminator = None
if timesheets is not None:
self.timesheets = timesheets
@property
def timesheets(self):
return self._timesheets
@timesheets.setter
def timesheets(self, timesheets):
self._timesheets = timesheets
| true
| true
|
f716ce82d68d18fd209d91f8e1db052c5725c571
| 6,938
|
py
|
Python
|
assignment2/cs231n/optim.py
|
Abhijeet8901/CS231n
|
c8e715028b453899d5069cdb34faf3fc2959c270
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/optim.py
|
Abhijeet8901/CS231n
|
c8e715028b453899d5069cdb34faf3fc2959c270
|
[
"MIT"
] | null | null | null |
assignment2/cs231n/optim.py
|
Abhijeet8901/CS231n
|
c8e715028b453899d5069cdb34faf3fc2959c270
|
[
"MIT"
] | null | null | null |
import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
w -= config["learning_rate"] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("momentum", 0.9)
v = config.get("velocity", np.zeros_like(w))
next_w=None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
v= config["momentum"]*v - config["learning_rate"]*dw
next_w=w+v
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
config["velocity"] = v
return next_w, config
def rmsprop(w, dw, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("decay_rate", 0.99)
config.setdefault("epsilon", 1e-8)
config.setdefault("cache", np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of w #
# in the next_w variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
cache=config["cache"]
cache=config["decay_rate"]*cache + (1-config["decay_rate"])*dw**2
w+=(-config["learning_rate"]*dw)/(np.sqrt(cache)+config["epsilon"])
next_w=w
config["cache"]=cache
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
def adam(w, dw, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None:
config = {}
config.setdefault("learning_rate", 1e-3)
config.setdefault("beta1", 0.9)
config.setdefault("beta2", 0.999)
config.setdefault("epsilon", 1e-8)
config.setdefault("m", np.zeros_like(w))
config.setdefault("v", np.zeros_like(w))
config.setdefault("t", 0)
next_w = None
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of w in #
# the next_w variable. Don't forget to update the m, v, and t variables #
# stored in config. #
# #
# NOTE: In order to match the reference output, please modify t _before_ #
# using it in any calculations. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
lr=config["learning_rate"]
b1,b2,ep=config["beta1"],config["beta2"],config["epsilon"]
m=config["m"]
v=config["v"]
t=config["t"]
t+=1
m=b1*m+(1-b1)*dw
mt=m/(1-b1**t)
v=b2*v+(1-b2)*dw**2
vt=v/(1-b2**t)
w-=(lr*mt)/(np.sqrt(vt)+ep)
config["m"],config["v"],config["t"]=m,v,t
next_w=w
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
| 38.544444
| 79
| 0.537763
|
import numpy as np
def sgd(w, dw, config=None):
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
w -= config["learning_rate"] * dw
return w, config
def sgd_momentum(w, dw, config=None):
if config is None:
config = {}
config.setdefault("learning_rate", 1e-2)
config.setdefault("momentum", 0.9)
v = config.get("velocity", np.zeros_like(w))
next_w=None
| true
| true
|
f716cea90be05811860d24af0a9d540d7d2e2e6c
| 4,451
|
py
|
Python
|
code/distributeHI.py
|
modichirag/21cmhod
|
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
|
[
"MIT"
] | null | null | null |
code/distributeHI.py
|
modichirag/21cmhod
|
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
|
[
"MIT"
] | null | null | null |
code/distributeHI.py
|
modichirag/21cmhod
|
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
|
[
"MIT"
] | null | null | null |
import numpy as np
import re, os
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower
from nbodykit import setup_logging
from mpi4py import MPI
import HImodels
# enable logging, we have some clue what's going on.
setup_logging('info')
#Get model as parameter
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', help='for small or big box', default='small')
parser.add_argument('-m', '--model', help='model name to use')
args = parser.parse_args()
if args.model == None:
print('Specify a model name')
sys.exit()
#print(args, args.model)
model = args.model #'ModelD'
boxsize = args.size
#
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
#Parameters, box size, number of mesh cells, simulation, ...
if boxsize == 'small':
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
elif boxsize == 'big':
bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'
else:
print('Box size not understood, should be "big" or "small"')
sys.exit()
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
#Which model & configuration to use
modeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC}
modedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'}
HImodel = modeldict[model] #HImodels.ModelB
modelname = model
mode = modedict[model]
ofolder = '../data/outputs/'
def distribution(aa, halocat, cencat, satcat, outfolder, mbins=None):
'''Compute the fraction of HI in halos, centrals, satellites'''
if rank==0: print('Calculating distribution')
if mbins is None: mbins = np.logspace(9, 15, 100)
hmass = halocat['Mass'].compute()
htotal, hsize, h1total = [], [], []
for im in range(mbins.size-1):
mask = (hmass >= mbins[im]) & (hmass < mbins[im+1])
rankweight = (hmass*mask).sum()
htotal.append(comm.allreduce(rankweight))
rankweight = (mask).sum()
hsize.append(comm.allreduce(rankweight))
h1bin = []
for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]:
rankweight = (cat.compute()*mask).sum()
h1bin.append(comm.allreduce(rankweight))
h1total.append(h1bin)
#
if rank==0:
tosave = np.zeros((len(hsize), 5))
tosave[:, 1] = hsize
tosave[:, 0] = htotal / (tosave[:, 1])
tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1))
tosave[np.isnan(tosave)] = 0
header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites'
np.savetxt(outfolder + "HI_dist_{:6.4f}.txt".format(aa), tosave, fmt='%0.6e', header=header)
if __name__=="__main__":
if rank==0: print('Starting')
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:]
if bs == 1024: outfolder = outfolder + "-big"
outfolder += "/%s/"%modelname
if rank == 0: print(outfolder)
#outfolder = ofolder + suff[1:] + "/%s/"%modelname
try:
os.makedirs(outfolder)
except : pass
for aa in alist:
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1))
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10##
halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
#
HImodelz = HImodel(aa)
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(),
cencat.csize, cencat['Mass'].size, cencat.comm).local
mbins = 10**np.arange(9, 15.1, 0.2)
distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)
| 34.238462
| 106
| 0.625927
|
import numpy as np
import re, os
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower
from nbodykit import setup_logging
from mpi4py import MPI
import HImodels
setup_logging('info')
#Get model as parameter
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', help='for small or big box', default='small')
parser.add_argument('-m', '--model', help='model name to use')
args = parser.parse_args()
if args.model == None:
print('Specify a model name')
sys.exit()
#print(args, args.model)
model = args.model #'ModelD'
boxsize = args.size
#
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
#Parameters, box size, number of mesh cells, simulation, ...
if boxsize == 'small':
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
elif boxsize == 'big':
bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'
else:
print('Box size not understood, should be "big" or "small"')
sys.exit()
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
modeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC}
modedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'}
HImodel = modeldict[model]
modelname = model
mode = modedict[model]
ofolder = '../data/outputs/'
def distribution(aa, halocat, cencat, satcat, outfolder, mbins=None):
if rank==0: print('Calculating distribution')
if mbins is None: mbins = np.logspace(9, 15, 100)
hmass = halocat['Mass'].compute()
htotal, hsize, h1total = [], [], []
for im in range(mbins.size-1):
mask = (hmass >= mbins[im]) & (hmass < mbins[im+1])
rankweight = (hmass*mask).sum()
htotal.append(comm.allreduce(rankweight))
rankweight = (mask).sum()
hsize.append(comm.allreduce(rankweight))
h1bin = []
for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]:
rankweight = (cat.compute()*mask).sum()
h1bin.append(comm.allreduce(rankweight))
h1total.append(h1bin)
if rank==0:
tosave = np.zeros((len(hsize), 5))
tosave[:, 1] = hsize
tosave[:, 0] = htotal / (tosave[:, 1])
tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1))
tosave[np.isnan(tosave)] = 0
header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites'
np.savetxt(outfolder + "HI_dist_{:6.4f}.txt".format(aa), tosave, fmt='%0.6e', header=header)
if __name__=="__main__":
if rank==0: print('Starting')
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:]
if bs == 1024: outfolder = outfolder + "-big"
outfolder += "/%s/"%modelname
if rank == 0: print(outfolder)
try:
os.makedirs(outfolder)
except : pass
for aa in alist:
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1))
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10 halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
HImodelz = HImodel(aa)
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(),
cencat.csize, cencat['Mass'].size, cencat.comm).local
mbins = 10**np.arange(9, 15.1, 0.2)
distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)
| true
| true
|
f716cec14ad98ba7140fbdbb91cb9e7cbc9274b0
| 11,751
|
py
|
Python
|
couplet_composer/project.py
|
anttikivi/couplet-composer
|
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
|
[
"MIT"
] | null | null | null |
couplet_composer/project.py
|
anttikivi/couplet-composer
|
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
|
[
"MIT"
] | 16
|
2020-10-29T17:31:47.000Z
|
2022-03-07T17:01:52.000Z
|
couplet_composer/project.py
|
anttikivi/couplet-composer
|
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Antti Kivi
# Licensed under the MIT License
"""A module that contains the class that represents the project
that the build script acts on.
"""
import importlib
import json
import logging
import os
from typing import Any, List
from .support.system import System
from .support import environment
from .binary_dependency import BinaryDependency
from .dependency import Dependency
class Project:
"""A class that that represents the project that the build
script acts on.
Attributes:
project_keys (list): The keys of the different subproject
under this project.
{key}_version (str): The version number of the subproject
which has the key '{key}'.
{key}_name (str): The name of the subproject which has
the key '{key}'.
gl_version (str): The target version number of OpenGL.
dependencies (list): A list containing the representation
objects of the dependencies of the project.
cmake_options (dict): A dictionary of CMake options to
pass to the build of the project.
"""
SHARED_VERSION_KEY = "shared_version"
SHARED_USAGE_VALUE = "shared"
VERSION_KEY = "version"
OPENGL_KEY = "opengl"
DEPENDENCIES_KEY = "dependencies"
NAME_KEY = "name"
COMMIT_KEY = "commit"
MODULE_KEY = "module"
MODULE_DEFAULT_VALUE = "default"
CLASS_KEY = "className"
FILES_KEY = "files"
TEST_ONLY_KEY = "testOnly"
BENCHMARK_ONLY_KEY = "benchmarkOnly"
ASSET_KEY = "asset"
REPOSITORY_KEY = "repository"
TAG_PREFIX_KEY = "tagPrefix"
CMAKE_OPTIONS_KEY = "cmakeOptions"
BINARY_KEY = "binary"
PLATFORMS_KEY = "platforms"
def __init__(
self,
source_root: str,
repo: str,
script_package: str,
platform: System
) -> None:
"""Initializes the project object.
Arguments:
source_root (str): The root directory of the
invocation in which the project and the build
files are.
repo (str): The name of the repository directory of
the project that is being built.
script_package (str): The name of the root Python
package of the build script.
platform (System): The platform that the build script
is invoked on.
"""
if not environment.is_path_source_root(path=source_root, repo=repo):
logging.critical(
"The root directory for the build script invocation is "
"invalid: %s",
source_root
)
raise ValueError
product_json = os.path.join(source_root, repo, "product.json")
dependency_data = None
try:
with open(product_json) as f:
json_data = json.load(f)
self.project_keys = list()
for key in json_data:
logging.debug(
"Checking if the key '%s' should be added to the "
"project keys",
key
)
if key != self.DEPENDENCIES_KEY \
and key != self.OPENGL_KEY \
and key != self.SHARED_VERSION_KEY \
and key != self.CMAKE_OPTIONS_KEY:
self.project_keys.append(key)
logging.debug(
"Added the key '%s' to the project keys",
key
)
for key in self.project_keys:
logging.debug("Setting the project values for %s", key)
setattr(
self,
"{}_version".format(key),
self._get_version_from_project_data(
data=json_data,
key=key
)
)
setattr(
self,
"{}_name".format(key),
json_data[key][self.NAME_KEY]
)
self.gl_version = json_data[self.OPENGL_KEY][self.VERSION_KEY]
dependency_data = self._get_from_project_data(
data=json_data,
key=self.DEPENDENCIES_KEY
)
self.cmake_options = json_data[self.CMAKE_OPTIONS_KEY] \
if self.CMAKE_OPTIONS_KEY in json_data else None
except OSError:
logging.critical(
"The project value file wasn't found: %s",
product_json
)
if not dependency_data:
raise ValueError
self.dependencies: List[Dependency] = list()
for key, value in dependency_data.items():
self.dependencies.append(self._create_dependency_object(
key=key,
data=value,
root_package=script_package,
platform=platform
))
def _get_from_project_data(self, data: object, key: str) -> Any:
"""Reads and resolves the given entry from the data got
from the project data JSON file.
Args:
data (Object): The data object read from the project
data JSON file.
key (str): The key for the data.
Returns:
The number, string, or object read from the project
data JSON file.
"""
if key not in data:
raise ValueError
return data[key]
def _get_version_from_project_data(self, data: object, key: str) -> str:
"""Reads and resolves the correct version from the data
got from the project data JSON file.
Args:
data (Object): The data object read from the project
data JSON file.
key (str): The key for the project part that the
version is resolved for.
Returns:
A 'str' that contains the resolved version number.
"""
shared = None if self.SHARED_VERSION_KEY not in data \
else data[self.SHARED_VERSION_KEY]
if key not in data:
raise ValueError
key_data = data[key]
if self.VERSION_KEY not in key_data:
if shared:
return shared
else:
raise ValueError
elif key_data[self.VERSION_KEY] == self.SHARED_USAGE_VALUE:
return shared
else:
return key_data[self.VERSION_KEY]
def _create_dependency_object(
self,
key: str,
data: dict,
root_package: str,
platform: System
) -> Dependency:
"""Creates the representation object of the given
dependency by resolving the correct module and class to
use.
Args:
key (str): The simple identifier of the dependency.
data (dict): The dependency data for the given key
read from the project data file.
root_package (str): The name of the root Python
package of the build script.
platform (System): The platform that the build script
is invoked on.
Returns:
The constructed dependency object.
"""
commit = None if self.COMMIT_KEY not in data else data[self.COMMIT_KEY]
library_files = None if self.FILES_KEY not in data \
else data[self.FILES_KEY]
platform_files = None
if self.PLATFORMS_KEY in data \
and platform.name in data[self.PLATFORMS_KEY] \
and self.FILES_KEY in data[self.PLATFORMS_KEY][platform.name]:
platform_data = data[self.PLATFORMS_KEY][platform.name]
platform_files = platform_data[self.FILES_KEY]
test_only = False
if self.TEST_ONLY_KEY in data:
test_only = data[self.TEST_ONLY_KEY]
benchmark_only = False
if self.BENCHMARK_ONLY_KEY in data:
benchmark_only = data[self.BENCHMARK_ONLY_KEY]
asset_name = None
if self.ASSET_KEY in data:
if isinstance(data[self.ASSET_KEY], dict):
asset_name = data[self.ASSET_KEY][platform.value]
else:
asset_name = data[self.ASSET_KEY]
repository = data[self.REPOSITORY_KEY] if self.REPOSITORY_KEY in data \
else None
tag_prefix = data[self.TAG_PREFIX_KEY] if self.TAG_PREFIX_KEY in data \
else Dependency.DEFAULT_TAG_PREFIX
cmake_options = data[self.CMAKE_OPTIONS_KEY] \
if self.CMAKE_OPTIONS_KEY in data else None
needs_binary = data[self.BINARY_KEY] if self.BINARY_KEY in data \
else None
if self.MODULE_KEY not in data or \
data[self.MODULE_KEY] == self.MODULE_DEFAULT_VALUE:
if needs_binary:
return BinaryDependency(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix,
cmake_options=cmake_options
)
else:
return Dependency(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix
)
else:
if self.CLASS_KEY not in data:
raise ValueError # TODO Add explanation or logging.
package_name = "{}.support.dependencies.{}".format(
root_package,
data[self.MODULE_KEY]
)
module = importlib.import_module(package_name)
dependency_class = getattr(module, data[self.CLASS_KEY])
if needs_binary:
return dependency_class(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix,
cmake_options=cmake_options
)
else:
return dependency_class(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix
)
| 33.864553
| 79
| 0.537997
|
import importlib
import json
import logging
import os
from typing import Any, List
from .support.system import System
from .support import environment
from .binary_dependency import BinaryDependency
from .dependency import Dependency
class Project:
SHARED_VERSION_KEY = "shared_version"
SHARED_USAGE_VALUE = "shared"
VERSION_KEY = "version"
OPENGL_KEY = "opengl"
DEPENDENCIES_KEY = "dependencies"
NAME_KEY = "name"
COMMIT_KEY = "commit"
MODULE_KEY = "module"
MODULE_DEFAULT_VALUE = "default"
CLASS_KEY = "className"
FILES_KEY = "files"
TEST_ONLY_KEY = "testOnly"
BENCHMARK_ONLY_KEY = "benchmarkOnly"
ASSET_KEY = "asset"
REPOSITORY_KEY = "repository"
TAG_PREFIX_KEY = "tagPrefix"
CMAKE_OPTIONS_KEY = "cmakeOptions"
BINARY_KEY = "binary"
PLATFORMS_KEY = "platforms"
def __init__(
self,
source_root: str,
repo: str,
script_package: str,
platform: System
) -> None:
if not environment.is_path_source_root(path=source_root, repo=repo):
logging.critical(
"The root directory for the build script invocation is "
"invalid: %s",
source_root
)
raise ValueError
product_json = os.path.join(source_root, repo, "product.json")
dependency_data = None
try:
with open(product_json) as f:
json_data = json.load(f)
self.project_keys = list()
for key in json_data:
logging.debug(
"Checking if the key '%s' should be added to the "
"project keys",
key
)
if key != self.DEPENDENCIES_KEY \
and key != self.OPENGL_KEY \
and key != self.SHARED_VERSION_KEY \
and key != self.CMAKE_OPTIONS_KEY:
self.project_keys.append(key)
logging.debug(
"Added the key '%s' to the project keys",
key
)
for key in self.project_keys:
logging.debug("Setting the project values for %s", key)
setattr(
self,
"{}_version".format(key),
self._get_version_from_project_data(
data=json_data,
key=key
)
)
setattr(
self,
"{}_name".format(key),
json_data[key][self.NAME_KEY]
)
self.gl_version = json_data[self.OPENGL_KEY][self.VERSION_KEY]
dependency_data = self._get_from_project_data(
data=json_data,
key=self.DEPENDENCIES_KEY
)
self.cmake_options = json_data[self.CMAKE_OPTIONS_KEY] \
if self.CMAKE_OPTIONS_KEY in json_data else None
except OSError:
logging.critical(
"The project value file wasn't found: %s",
product_json
)
if not dependency_data:
raise ValueError
self.dependencies: List[Dependency] = list()
for key, value in dependency_data.items():
self.dependencies.append(self._create_dependency_object(
key=key,
data=value,
root_package=script_package,
platform=platform
))
def _get_from_project_data(self, data: object, key: str) -> Any:
if key not in data:
raise ValueError
return data[key]
def _get_version_from_project_data(self, data: object, key: str) -> str:
shared = None if self.SHARED_VERSION_KEY not in data \
else data[self.SHARED_VERSION_KEY]
if key not in data:
raise ValueError
key_data = data[key]
if self.VERSION_KEY not in key_data:
if shared:
return shared
else:
raise ValueError
elif key_data[self.VERSION_KEY] == self.SHARED_USAGE_VALUE:
return shared
else:
return key_data[self.VERSION_KEY]
def _create_dependency_object(
self,
key: str,
data: dict,
root_package: str,
platform: System
) -> Dependency:
commit = None if self.COMMIT_KEY not in data else data[self.COMMIT_KEY]
library_files = None if self.FILES_KEY not in data \
else data[self.FILES_KEY]
platform_files = None
if self.PLATFORMS_KEY in data \
and platform.name in data[self.PLATFORMS_KEY] \
and self.FILES_KEY in data[self.PLATFORMS_KEY][platform.name]:
platform_data = data[self.PLATFORMS_KEY][platform.name]
platform_files = platform_data[self.FILES_KEY]
test_only = False
if self.TEST_ONLY_KEY in data:
test_only = data[self.TEST_ONLY_KEY]
benchmark_only = False
if self.BENCHMARK_ONLY_KEY in data:
benchmark_only = data[self.BENCHMARK_ONLY_KEY]
asset_name = None
if self.ASSET_KEY in data:
if isinstance(data[self.ASSET_KEY], dict):
asset_name = data[self.ASSET_KEY][platform.value]
else:
asset_name = data[self.ASSET_KEY]
repository = data[self.REPOSITORY_KEY] if self.REPOSITORY_KEY in data \
else None
tag_prefix = data[self.TAG_PREFIX_KEY] if self.TAG_PREFIX_KEY in data \
else Dependency.DEFAULT_TAG_PREFIX
cmake_options = data[self.CMAKE_OPTIONS_KEY] \
if self.CMAKE_OPTIONS_KEY in data else None
needs_binary = data[self.BINARY_KEY] if self.BINARY_KEY in data \
else None
if self.MODULE_KEY not in data or \
data[self.MODULE_KEY] == self.MODULE_DEFAULT_VALUE:
if needs_binary:
return BinaryDependency(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix,
cmake_options=cmake_options
)
else:
return Dependency(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix
)
else:
if self.CLASS_KEY not in data:
raise ValueError # TODO Add explanation or logging.
package_name = "{}.support.dependencies.{}".format(
root_package,
data[self.MODULE_KEY]
)
module = importlib.import_module(package_name)
dependency_class = getattr(module, data[self.CLASS_KEY])
if needs_binary:
return dependency_class(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix,
cmake_options=cmake_options
)
else:
return dependency_class(
key=key,
name=data[self.NAME_KEY],
version=data[self.VERSION_KEY],
commit=commit,
files=library_files,
platform_files=platform_files,
test_only=test_only,
benchmark_only=benchmark_only,
asset_name=asset_name,
repository=repository,
tag_prefix=tag_prefix
)
| true
| true
|
f716cf2c2f40b9fb26a797c10a0e167af41d51ea
| 1,354
|
py
|
Python
|
petpetgif/petpet.py
|
Gst0ne/pet-pet-gif
|
e219a859558df99424625e3dc51b287e5c7674ff
|
[
"MIT"
] | 1
|
2021-11-06T13:14:01.000Z
|
2021-11-06T13:14:01.000Z
|
petpetgif/petpet.py
|
Gst0ne/pet-pet-gif
|
e219a859558df99424625e3dc51b287e5c7674ff
|
[
"MIT"
] | null | null | null |
petpetgif/petpet.py
|
Gst0ne/pet-pet-gif
|
e219a859558df99424625e3dc51b287e5c7674ff
|
[
"MIT"
] | 1
|
2021-11-12T08:50:24.000Z
|
2021-11-12T08:50:24.000Z
|
from PIL import Image
from petpetgif.saveGif import save_transparent_gif
from pkg_resources import resource_stream
frames = 10
resolution = (128, 128)
delay = 20
def make(source, dest):
"""
:param source: A filename (string), pathlib.Path object or a file object. (This parameter corresponds
and is passed to the PIL.Image.open() method.)
:param dest: A filename (string), pathlib.Path object or a file object. (This parameter corresponds
and is passed to the PIL.Image.save() method.)
:return: None
"""
images = []
base = Image.open(source).convert('RGBA').resize(resolution)
for i in range(frames):
squeeze = i if i < frames/2 else frames - i
width = 0.8 + squeeze * 0.02
height = 0.8 - squeeze * 0.05
offsetX = (1 - width) * 0.5 + 0.1
offsetY = (1 - height) - 0.08
canvas = Image.new('RGBA', size=resolution, color=(0, 0, 0, 0))
canvas.paste(base.resize((round(width * resolution[0]), round(height * resolution[1]))), (round(offsetX * resolution[0]), round(offsetY * resolution[1])))
pet = Image.open(resource_stream(__name__, f"img/pet{i}.gif")).convert('RGBA').resize(resolution)
canvas.paste(pet, mask=pet)
images.append(canvas)
save_transparent_gif(images, durations=30, save_file=dest)
| 38.685714
| 162
| 0.639586
|
from PIL import Image
from petpetgif.saveGif import save_transparent_gif
from pkg_resources import resource_stream
frames = 10
resolution = (128, 128)
delay = 20
def make(source, dest):
images = []
base = Image.open(source).convert('RGBA').resize(resolution)
for i in range(frames):
squeeze = i if i < frames/2 else frames - i
width = 0.8 + squeeze * 0.02
height = 0.8 - squeeze * 0.05
offsetX = (1 - width) * 0.5 + 0.1
offsetY = (1 - height) - 0.08
canvas = Image.new('RGBA', size=resolution, color=(0, 0, 0, 0))
canvas.paste(base.resize((round(width * resolution[0]), round(height * resolution[1]))), (round(offsetX * resolution[0]), round(offsetY * resolution[1])))
pet = Image.open(resource_stream(__name__, f"img/pet{i}.gif")).convert('RGBA').resize(resolution)
canvas.paste(pet, mask=pet)
images.append(canvas)
save_transparent_gif(images, durations=30, save_file=dest)
| true
| true
|
f716cf6f63fdd848b405bbb16c421fdd80bde9ff
| 1,873
|
py
|
Python
|
remora/tests/test_tracker.py
|
Hugoch/remora-python
|
1bb19200135bb84ee5e6e28fe25057ed096c8e31
|
[
"MIT"
] | null | null | null |
remora/tests/test_tracker.py
|
Hugoch/remora-python
|
1bb19200135bb84ee5e6e28fe25057ed096c8e31
|
[
"MIT"
] | null | null | null |
remora/tests/test_tracker.py
|
Hugoch/remora-python
|
1bb19200135bb84ee5e6e28fe25057ed096c8e31
|
[
"MIT"
] | null | null | null |
import unittest
from remora.tracker import Tracker
from remora.collectors import AsyncCollector
import requests_mock
import json
class TestTracker(unittest.TestCase):
def test_send_payload(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar')
t.track_application_start()
collector.queue.join()
res = json.loads(req.last_request.text)
assert res['name'] == 'start'
assert res['app_id'] == 'bar'
assert res['namespace'] == 'foo'
def test_send_payload_with_custom_fields(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar', cpu_count=4, user_type='internal')
t.track_application_start()
collector.queue.join()
res = json.loads(req.last_request.text)
assert res['name'] == 'start'
assert res['app_id'] == 'bar'
assert res['namespace'] == 'foo'
assert res['cpu_count'] == 4
assert res['user_type'] == 'internal'
def test_duration_decorator(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
def test(arg):
pass
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar')
t.track_duration('a_duration')(test)(1)
collector.queue.join()
res = json.loads(req.last_request.text)
assert 'duration' in res
| 39.020833
| 100
| 0.571276
|
import unittest
from remora.tracker import Tracker
from remora.collectors import AsyncCollector
import requests_mock
import json
class TestTracker(unittest.TestCase):
def test_send_payload(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar')
t.track_application_start()
collector.queue.join()
res = json.loads(req.last_request.text)
assert res['name'] == 'start'
assert res['app_id'] == 'bar'
assert res['namespace'] == 'foo'
def test_send_payload_with_custom_fields(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar', cpu_count=4, user_type='internal')
t.track_application_start()
collector.queue.join()
res = json.loads(req.last_request.text)
assert res['name'] == 'start'
assert res['app_id'] == 'bar'
assert res['namespace'] == 'foo'
assert res['cpu_count'] == 4
assert res['user_type'] == 'internal'
def test_duration_decorator(self):
url = 'http://127.0.0.1:31311'
with requests_mock.mock() as m:
req = m.put(url)
def test(arg):
pass
collector = AsyncCollector('http://127.0.0.1:31311')
t = Tracker(collector, namespace='foo', app_id='bar')
t.track_duration('a_duration')(test)(1)
collector.queue.join()
res = json.loads(req.last_request.text)
assert 'duration' in res
| true
| true
|
f716d0756983d74be295f8050487b4e269c28f44
| 3,562
|
py
|
Python
|
src/client/user_bonus.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 153
|
2021-02-06T13:41:11.000Z
|
2022-03-19T17:51:01.000Z
|
src/client/user_bonus.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 29
|
2021-01-15T12:54:37.000Z
|
2022-02-07T07:45:32.000Z
|
src/client/user_bonus.py
|
ZackPashkin/toloka-kit
|
8f650e5d8cdded1949ca633cf78f9b851ce839bb
|
[
"Apache-2.0"
] | 17
|
2021-01-29T15:20:04.000Z
|
2022-01-30T07:21:03.000Z
|
__all__ = [
'UserBonus',
'UserBonusCreateRequestParameters'
]
from attr.validators import optional, instance_of
import datetime
from decimal import Decimal
from typing import Any
from .primitives.base import BaseTolokaObject
from .primitives.parameter import Parameters
from ..util._codegen import attribute
class UserBonus(BaseTolokaObject):
"""Issuing a bonus to a specific performer
It's addition to payment for completed tasks.
Attributes:
user_id: Performer ID to whom the bonus will be issued.
amount: The bonus amount in dollars. Can be from 0.01 to 100 dollars per user per time.
private_comment: Comments that are only visible to the requester.
public_title: Message header for the user. You can provide a title in several languages
(the message will come in the user's language).
public_message: Message text for the user. You can provide text in several languages
(the message will come in the user's language).
without_message: Do not send a bonus message to the user. To award a bonus without a message, specify null
for public_title and public_message and True for without_message.
assignment_id: The answer to the task for which this bonus was issued.
id: Internal ID of the issued bonus. Read only.
created: Date the bonus was awarded, in UTC. Read only.
Example:
How to create bonus with message for specific assignment.
>>> new_bonus = toloka_client.create_user_bonus(
>>> UserBonus(
>>> user_id='1',
>>> amount='0.50',
>>> public_title='Perfect job!',
>>> public_message='You are the best performer EVER!'
>>> assignment_id='012345'
>>> )
>>> )
...
Hoiw to create bonus with message in several languages.
>>> new_bonus = toloka_client.create_user_bonus(
>>> UserBonus(
>>> user_id='1',
>>> amount='0.10',
>>> public_title= {
>>> 'EN': 'Good Job!',
>>> 'RU': 'Молодец!',
>>> },
>>> public_message: {
>>> 'EN': 'Ten tasks completed',
>>> 'RU': 'Выполнено 10 заданий',
>>> },
>>> )
>>> )
...
"""
user_id: str
amount: Decimal = attribute(validator=optional(instance_of(Decimal)))
private_comment: str
public_title: Any
public_message: Any
without_message: bool
assignment_id: str
# Readonly
id: str = attribute(readonly=True)
created: datetime.datetime = attribute(readonly=True)
class UserBonusCreateRequestParameters(Parameters):
"""Parameters for creating performer bonuses
Used in methods 'create_user_bonus', 'create_user_bonuses' и 'create_user_bonuses_async' of the class TolokaClient,
to clarify the behavior when creating bonuses.
Attributes:
operation_id: Operation ID. If asynchronous creation is used, by this identifier you can later get
results of creating bonuses.
skip_invalid_items: Validation parameters of objects:
* True - Award a bonus if the object with bonus information passed validation. Otherwise, skip the bonus.
* False - Default behaviour. Stop the operation and don't award bonuses if at least one object didn't pass validation.
"""
operation_id: str
skip_invalid_items: bool
| 36.721649
| 130
| 0.630264
|
__all__ = [
'UserBonus',
'UserBonusCreateRequestParameters'
]
from attr.validators import optional, instance_of
import datetime
from decimal import Decimal
from typing import Any
from .primitives.base import BaseTolokaObject
from .primitives.parameter import Parameters
from ..util._codegen import attribute
class UserBonus(BaseTolokaObject):
user_id: str
amount: Decimal = attribute(validator=optional(instance_of(Decimal)))
private_comment: str
public_title: Any
public_message: Any
without_message: bool
assignment_id: str
id: str = attribute(readonly=True)
created: datetime.datetime = attribute(readonly=True)
class UserBonusCreateRequestParameters(Parameters):
operation_id: str
skip_invalid_items: bool
| true
| true
|
f716d0a144b04a18cf4e4f949f0d63ac16f60306
| 120,881
|
py
|
Python
|
src/sage/misc/sage_input.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/misc/sage_input.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/misc/sage_input.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 1
|
2020-07-24T12:20:37.000Z
|
2020-07-24T12:20:37.000Z
|
r"""
Sage Input Formatting
This module provides the function :func:`sage_input` that takes an
arbitrary sage value and produces a sequence of commands that, if typed
at the ``sage:`` prompt, will recreate the value. If this is not
implemented for a particular value, then an exception is raised instead.
This might be useful in understanding a part of Sage, or for debugging.
For instance, if you have a value produced in a complicated way in the
middle of a debugging session, you could use :func:`sage_input` to find
a simple way to produce the same value. We attempt to produce commands
that are readable and idiomatic.::
sage: sage_input(3)
3
sage: sage_input((polygen(RR) + RR(pi))^2, verify=True)
# Verified
R.<x> = RR[]
x^2 + 6.2831853071795862*x + 9.869604401089358
With ``verify=True``, :func:`sage_input` also verifies the results, by
calling :func:`~sage.misc.sage_eval.sage_eval` on the result and
verifying that it is equal to the input.::
sage: sage_input(GF(2)(1), verify=True)
# Verified
GF(2)(1)
We can generate code that works without the preparser, with
``preparse=False``; or we can generate code that will work whether or
not the preparser is enabled, with ``preparse=None``. Generating code
with ``preparse=False`` may be useful to see how to create a certain
value in a Python or Cython source file.::
sage: sage_input(5, verify=True)
# Verified
5
sage: sage_input(5, preparse=False)
ZZ(5)
sage: sage_input(5, preparse=None)
ZZ(5)
sage: sage_input(5r, verify=True)
# Verified
5r
sage: sage_input(5r, preparse=False)
5
sage: sage_input(5r, preparse=None)
int(5)
Adding :func:`sage_input` support to your own classes is
straightforward. You need to add a :func:`_sage_input_` method which
returns a :class:`SageInputExpression` (henceforth abbreviated as SIE)
which will reconstruct this instance of your class.
A ``_sage_input_`` method takes two parameters, conventionally named
``sib`` and ``coerced``. The first argument is a
:class:`SageInputBuilder`; it has methods to build SIEs. The second
argument, ``coerced``, is a boolean. This is only useful if your class
is a subclass of :class:`Element` (although it is always present). If
``coerced`` is ``False``, then your method must generate an expression
which will evaluate to a value of the correct type with the correct
parent. If ``coerced`` is ``True``, then your method may generate an
expression of a type that has a canonical coercion to your type; and if
``coerced`` is 2, then your method may generate an expression of a type
that has a conversion to your type.
Let's work through some examples. We'll build a sequence of functions
that would be acceptable as ``_sage_input_`` methods for the
:class:`~sage.rings.rational.Rational` class.
Here's the first and simplest version.::
sage: def qq_sage_input_v1(self, sib, coerced):
....: return sib(self.numerator())/sib(self.denominator())
We see that given a :class:`SageInputBuilder` ``sib``, you can construct
a SIE for a value ``v`` simply with ``sib(v)``, and you can construct a
SIE for a quotient with the division operator. Of course, the other
operators also work, and so do function calls, method calls, subscripts,
etc.
We'll test with the following code, which you don't need to understand.
(It produces a list of 8 results, showing the formatted versions of -5/7
and 3, with the preparser either enabled or disabled and either with or
without an automatic coercion to QQ.)::
sage: from sage.misc.sage_input import SageInputBuilder
sage: def test_qq_formatter(fmt):
....: results = []
....: for v in [-5/7, QQ(3)]:
....: for pp in [False, True]:
....: for coerced in [False, True]:
....: sib = SageInputBuilder(preparse=pp)
....: results.append(sib.result(fmt(v, sib, coerced)))
....: return results
sage: test_qq_formatter(qq_sage_input_v1)
[-ZZ(5)/ZZ(7), -ZZ(5)/ZZ(7), -5/7, -5/7, ZZ(3)/ZZ(1), ZZ(3)/ZZ(1), 3/1, 3/1]
Let's try for some shorter, perhaps nicer-looking output. We'll start
by getting rid of the ``ZZ`` in the denominators; even without the
preparser, ``-ZZ(5)/7 == -ZZ(5)/ZZ(7)``.::
sage: def qq_sage_input_v2(self, sib, coerced):
....: return sib(self.numerator())/sib.int(self.denominator())
The ``int`` method on :class:`SageInputBuilder` returns a SIE for an
integer that is always represented in the simple way, without coercions.
(So, depending on the preparser mode, it might read in as an
:class:`~sage.rings.integer.Integer`, an ``int``, or a ``long``.)::
sage: test_qq_formatter(qq_sage_input_v2)
[-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, ZZ(3)/1, ZZ(3)/1, 3/1, 3/1]
Next let's get rid of the divisions by 1. These are more complicated,
since if we're not careful we'll get results in \ZZ instead of \QQ.::
sage: def qq_sage_input_v3(self, sib, coerced):
....: if self.denominator() == 1:
....: if coerced:
....: return sib.int(self.numerator())
....: else:
....: return sib.name('QQ')(sib.int(self.numerator()))
....: return sib(self.numerator())/sib.int(self.denominator())
We see that the \method{name} method gives an SIE representing a \sage
constant or function.::
sage: test_qq_formatter(qq_sage_input_v3)
[-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, QQ(3), 3, QQ(3), 3]
This is the prettiest output we're going to get, but let's make one
further refinement. Other :class:`_sage_input_` methods, like the one
for polynomials, analyze the structure of SIEs; they work better (give
prettier output) if negations are at the outside. If the above code
were used for rationals, then ``sage_input(polygen(QQ) - 2/3)`` would
produce ``x + (-2/3)``; if we change to the following code, then we
would get ``x - 2/3`` instead.::
sage: def qq_sage_input_v4(self, sib, coerced):
....: num = self.numerator()
....: neg = (num < 0)
....: if neg: num = -num
....: if self.denominator() == 1:
....: if coerced:
....: v = sib.int(num)
....: else:
....: v = sib.name('QQ')(sib.int(num))
....: else:
....: v = sib(num)/sib.int(self.denominator())
....: if neg: v = -v
....: return v
sage: test_qq_formatter(qq_sage_input_v4)
[-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, QQ(3), 3, QQ(3), 3]
AUTHORS:
- Carl Witty (2008-04): new file
- Vincent Delecroix (2015-02): documentation formatting
"""
##########################################################################
#
# Copyright (C) 2008 Carl Witty <Carl.Witty@gmail.com>
# 2015 Vincent Delecroix <20100.delecroix@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#
##########################################################################
def sage_input(x, preparse=True, verify=False, allow_locals=False):
r"""
Return a sequence of commands that can be used to rebuild the object ``x``.
INPUT:
- ``x`` - the value we want to find an input form for
- ``preparse`` - (default ``True``) Whether to generate code that requires
the preparser. With ``True``, generated code requires the preparser.
With ``False``, generated code requires that the preparser not be used.
With ``None``, generated code will work whether or not the preparser is
used.
- ``verify`` - (default ``False``) If ``True``, then the answer will be
evaluated with :func:`sage_eval`, and an exception will be raised if the
result is not equal to the original value. (In fact, for ``verify=True``,
:func:`sage_input` is effectively run three times, with ``preparse`` set
to ``True``, ``False``, and ``None``, and all three results are checked.)
This is particularly useful for doctests.
- ``allow_locals`` - (default ``False``) If ``True``, then values that
:func:`sage_input` cannot handle are returned in a dictionary, and the
returned code assumes that this dictionary is passed as the ``locals``
parameter of :func:`sage_eval`. (Otherwise, if :func:`sage_input` cannot
handle a value, an exception is raised.)
EXAMPLES::
sage: sage_input(GF(2)(1))
GF(2)(1)
sage: sage_input((GF(2)(0), GF(2)(1)), verify=True)
# Verified
GF_2 = GF(2)
(GF_2(0), GF_2(1))
When the preparser is enabled, we use the \sage generator syntax.::
sage: K.<x> = GF(5)[]
sage: sage_input(x^3 + 2*x, verify=True)
# Verified
R.<x> = GF(5)[]
x^3 + 2*x
sage: sage_input(x^3 + 2*x, preparse=False)
R = GF(5)['x']
x = R.gen()
x**3 + 2*x
The result of :func:`sage_input` is actually a pair of strings with a
special ``__repr__`` method to print nicely.::
sage: r = sage_input(RealField(20)(pi), verify=True)
sage: r
# Verified
RealField(20)(3.1415939)
sage: isinstance(r, tuple)
True
sage: len(r)
2
sage: tuple(r)
('# Verified\n', 'RealField(20)(3.1415939)')
We cannot find an input form for a function.::
sage: sage_input((3, lambda x: x))
Traceback (most recent call last):
...
ValueError: Can't convert <function <lambda> at 0x...> to sage_input form
But we can have :func:`sage_input` continue anyway, and return an input form
for the rest of the expression, with ``allow_locals=True``.::
sage: r = sage_input((3, lambda x: x), verify=True, allow_locals=True)
sage: r
LOCALS:
_sil1: <function <lambda> at 0x...>
# Verified
(3, _sil1)
sage: tuple(r)
('# Verified\n', '(3, _sil1)', {'_sil1': <function <lambda> at 0x...>})
"""
if not verify:
sib = SageInputBuilder(allow_locals=allow_locals, preparse=preparse)
return sib.result(sib(x))
# In verify mode, we actually compute and verify the answer with
# all three settings of preparse.
for pp in (True, False, None):
sib = SageInputBuilder(allow_locals=allow_locals, preparse=pp)
ans = sib.result(sib(x))
verify_si_answer(x, ans, pp)
if pp == preparse:
ans_l = list(ans)
ans_l[0] = '# Verified\n' + ans_l[0]
final_answer = SageInputAnswer(*ans_l)
return final_answer
class SageInputBuilder:
r"""
An instance of this class is passed to ``_sage_input_`` methods.
It keeps track of the current state of the ``_sage_input_`` process,
and contains many utility methods for building :class:`SageInputExpression`
objects.
In normal use, instances of :class:`SageInputBuilder` are created
internally by :func:`sage_input`, but it may be useful to create
an instance directly for testing or doctesting.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
We can create a :class:`SageInputBuilder`, use it to create some
:class:`SageInputExpression` s, and get a result. (As mentioned
above, this is only useful for testing or doctesting; normally
you would just use :func:`sage_input`.)::
sage: sib = SageInputBuilder()
sage: sib.result((sib(3) + sib(4)) * (sib(5) + sib(6)))
(3 + 4)*(5 + 6)
"""
def __init__(self, allow_locals=False, preparse=True):
r"""
Initialize an instance of :class:`SageInputBuilder`.
In normal use, instances of :class:`SageInputBuilder` are created
internally by :func:`sage_input`, but it may be useful to create
an instance directly for testing or doctesting.
INPUT:
- ``allow_locals`` - (default ``False``) If true, then values
that cannot be converted to input form will be stored in
a dictionary, which must be passed as the ``locals``
when evaluating the result.
- ``preparse`` -- (default ``True``) If true, then the result
will assume that the preparser is enabled. If false, then
the result will assume that the preparser is disabled.
If ``None``, then the result will work whether or
not the preparser is enabled.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: SageInputBuilder().preparse()
True
sage: SageInputBuilder(preparse=False).preparse()
False
"""
self._allow_locals = allow_locals
self._preparse = preparse
self._cached_types = set()
self._cache = {}
self._id_cache = {}
self._parent_gens = {}
self._next_local = 1
self._locals = {}
def __call__(self, x, coerced=False):
r"""
Tries to convert an arbitrary value ``x`` into a
:class:`SageInputExpression` (an SIE).
We first check to see if an SIE has been cached for ``x``;
if so, we return it. If ``x`` is already an SIE, we return
it unchanged.
If ``x`` has a \method{_sage_input_} method, we call that
method.
Otherwise, if ``x`` is a value of some Python type that
we know how to deal with, we convert it directly.
Finally, for values we don't know how to convert, if
``self._allow_locals`` is true, we add it to a
``locals`` dictionary.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib(sib(3)))
3
sage: sib = SageInputBuilder()
sage: sib.result(sib(GF(17)(5)))
GF(17)(5)
The argument ``coerced=True`` or ``coerced=2`` will get
passed to the \method{_sage_input_} method of the argument.::
sage: sib = SageInputBuilder()
sage: sib.result(sib(GF(17)(5), True))
5
sage: sib.result(sib(RealField(200)(1.5), True))
1.5000000000000000000000000000000000000000000000000000000000000
sage: sib.result(sib(RealField(200)(1.5), 2))
1.5
Since :func:`sage_input` directly calls this method, all
of the following are indirect doctests.::
sage: sage_input(True)
True
sage: sage_input(-5r, verify=True)
# Verified
-5r
sage: sage_input(7r, preparse=False, verify=True)
# Verified
7
sage: sage_input(-11r, preparse=None, verify=True)
# Verified
-int(11)
sage: sage_input(long(-5), verify=True)
# Verified
-long(5)
sage: sage_input(long(-7), preparse=False, verify=True)
# Verified
-7L
sage: sage_input(long(11), preparse=None, verify=True)
# Verified
long(11)
sage: sage_input(long(2^70), verify=True)
# Verified
1180591620717411303424r
sage: sage_input(-long(2^80), preparse=False, verify=True)
# Verified
-1208925819614629174706176
sage: sage_input(long(2^75), preparse=None, verify=True)
# Verified
long(37778931862957161709568)
sage: sage_input(float(-infinity), preparse=True, verify=True)
# Verified
-float(infinity)
sage: sage_input(float(NaN), preparse=True, verify=True)
# Verified
float(NaN)
sage: sage_input(float(-pi), preparse=True, verify=True)
# Verified
float(-RR(3.1415926535897931))
sage: sage_input(float(42), preparse=True, verify=True)
# Verified
float(42)
sage: sage_input("Hello, world\n", verify=True)
# Verified
'Hello, world\n'
sage: sage_input("'", verify=True)
# Verified
"'"
sage: sage_input('"', verify=True)
# Verified
'"'
sage: sage_input(''' "'Hi,' she said." ''', verify=True)
# Verified
' "\'Hi,\' she said." '
sage: sage_input('Icky chars: \0\n\t\b\'\"\200\300\234', verify=True)
# Verified
'Icky chars: \x00\n\t\x08\'"\x80\xc0\x9c'
sage: sage_input(u'unicode with spectral: \u1234\U00012345', verify=True)
# Verified
u'unicode with spectral: \u1234\U00012345'
sage: sage_input((2, 3.5, 'Hi'), verify=True)
# Verified
(2, 3.5, 'Hi')
sage: sage_input(lambda x: x)
Traceback (most recent call last):
...
ValueError: Can't convert <function <lambda> at 0x...> to sage_input form
sage: sage_input(lambda x: x, allow_locals=True, verify=True)
LOCALS:
_sil1: <function <lambda> at 0x...>
# Verified
_sil1
"""
# We want to look up x in our cache, to see if we've seen it before.
# However, we don't want to assume that hashing x is always
# efficient, so we only try the lookup if some value of the same
# type as x has been cached.
from sage.structure.all import parent
if type(x) in self._cached_types:
v = self._cache.get((parent(x), x))
if v is not None: return v
v = self._id_cache.get(id(x))
if v is not None: return v[1]
if isinstance(x, SageInputExpression):
return x
if hasattr(x, '_sage_input_'):
return x._sage_input_(self, coerced)
if x is None:
return SIE_literal_stringrep(self, 'None')
if isinstance(x, bool):
return SIE_literal_stringrep(self, str(x))
if isinstance(x, int) or \
(isinstance(x, long) and isinstance(int(x), long)):
# For longs that don't fit in an int, we just use the int
# code; it will get extended to long automatically.
if self._preparse == True:
if x < 0:
return -SIE_literal_stringrep(self, str(-x) + 'r')
else:
return SIE_literal_stringrep(self, str(x) + 'r')
elif self._preparse == False:
return self.int(x)
else:
tyname = 'int' if isinstance(x, int) else 'long'
if x < 0:
return -self.name(tyname)(self.int(-x))
else:
return self.name(tyname)(self.int(x))
if isinstance(x, long):
# This must be a long that does fit in an int, so we need either
# long(x) or an 'L' suffix.
# With the current preparser, 1Lr does not work.
# 1rL does work; but that's just ugly, so I don't use it.
if self._preparse == False:
if x < 0:
return -SIE_literal_stringrep(self, str(-x) + 'L')
else:
return SIE_literal_stringrep(self, str(x) + 'L')
else:
if x < 0:
return -self.name('long')(self.int(-x))
else:
return self.name('long')(self.int(x))
if isinstance(x, float):
# floats could often have prettier output,
# but I think they're rare enough in Sage that it's not
# worth the effort.
from sage.all import RR, ZZ, infinity
if x == float(infinity):
return self.name('float')(self.name('infinity'))
if x != x:
return self.name('float')(self.name('NaN'))
if x == -float(infinity):
return -self.name('float')(self.name('infinity'))
if self._preparse == False and float(str(x)) == x:
if x < 0:
return -SIE_literal_stringrep(self, str(-x))
else:
return SIE_literal_stringrep(self, str(x))
rrx = RR(x)
if rrx in ZZ and abs(rrx) < (1 << 53):
return self.name('float')(self.int(ZZ(rrx)))
return self.name('float')(RR(x))
if isinstance(x, (str, unicode)):
return SIE_literal_stringrep(self, repr(x))
if isinstance(x, tuple):
return SIE_tuple(self, [self(_) for _ in x], False)
if isinstance(x, list):
return SIE_tuple(self, [self(_) for _ in x], True)
if isinstance(x, dict):
return self.dict(x)
if self._allow_locals:
loc = self._next_local
self._next_local += 1
loc_name = '_sil%d' % loc
self._locals[loc_name] = x
return SIE_literal_stringrep(self, loc_name)
else:
raise ValueError("Can't convert {} to sage_input form".format(x))
def preparse(self):
r"""
Checks the preparse status.
It returns ``True`` if the preparser will be enabled, ``False`` if it
will be disabled, and ``None`` if the result must work whether or not
the preparser is enabled.
For example, this is useful in the \method{_sage_input_}
methods of :class:`~sage.rings.integer.Integer` and :class:`RealNumber`; but most
\method{_sage_input_} methods will not need to examine this.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: SageInputBuilder().preparse()
True
sage: SageInputBuilder(preparse=False).preparse()
False
"""
return self._preparse
def int(self, n):
r"""
Return a raw SIE from the integer ``n``
As it is raw, it may read back as a Sage Integer, a Python int or a
Python long, depending on its size and whether the preparser is enabled.
INPUT:
- ``n`` - a Sage Integer, a Python int or a Python long
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.int(-3^50))
-717897987691852588770249
sage: sib = SageInputBuilder()
sage: sib.result(sib.int(long(2^65)))
36893488147419103232
sage: sib = SageInputBuilder()
sage: sib.result(sib.int(-42r))
-42
"""
if n < 0:
return -SIE_literal_stringrep(self, -n)
else:
return SIE_literal_stringrep(self, n)
def float_str(self, n):
r"""
Given a string representing a floating-point number,
produces a :class:`SageInputExpression` that formats as that
string.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.float_str(repr(RR(e))))
2.71828182845905
"""
return SIE_literal_stringrep(self, n)
def name(self, n):
r"""
Given a string representing a Python name,
produces a :class:`SageInputExpression` for that name.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.name('pi') + sib.name('e'))
pi + e
"""
return SIE_literal_stringrep(self, n)
def cache(self, x, sie, name):
r"""
INPUT:
- ``x`` - an arbitrary value
- ``sie`` - a :class:`SageInputExpression`
- ``name`` - a requested variable name
Enters ``x`` and ``sie`` in a cache, so that subsequent calls
``self(x)`` will directly return ``sie``. Also, marks the
requested name of this ``sie`` to be ``name``.
This should almost always be called as part of the
\method{_sage_input_} method of a parent. It may also be called
on values of an arbitrary type, which may be useful if the values
are both large and likely to be used multiple times in a single
expression.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie42 = sib(GF(101)(42))
sage: sib.cache(GF(101)(42), sie42, 'the_ultimate_answer')
sage: sib.result(sib(GF(101)(42)) + sib(GF(101)(42)))
the_ultimate_answer = GF(101)(42)
the_ultimate_answer + the_ultimate_answer
Note that we don't assign the result to a variable if the value
is only used once.::
sage: sib = SageInputBuilder()
sage: sie42 = sib(GF(101)(42))
sage: sib.cache(GF(101)(42), sie42, 'the_ultimate_answer')
sage: sib.result(sib(GF(101)(42)) + sib(GF(101)(43)))
GF_101 = GF(101)
GF_101(42) + GF_101(43)
"""
from sage.structure.all import parent
self._cached_types.add(type(x))
self._cache[(parent(x), x)] = sie
sie._sie_preferred_varname = name
def id_cache(self, x, sie, name):
r"""
INPUT:
- ``x`` - an arbitrary value
- ``sie`` - a :class:`SageInputExpression`
- ``name`` - a requested variable name
Enters ``x`` and ``sie`` in a cache, so that subsequent calls
``self(x)`` will directly return ``sie``. Also, marks the
requested name of this ``sie`` to be ``name``. Differs from
the \method{cache} method in that the cache is keyed by
``id(x)`` instead of by ``x``.
This may be called on values of an arbitrary type, which may
be useful if the values are both large and likely to be used
multiple times in a single expression; it should be preferred to
\method{cache} if equality on the values is difficult or impossible
to compute.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: x = polygen(ZZ)
sage: sib = SageInputBuilder()
sage: my_42 = 42*x
sage: sie42 = sib(my_42)
sage: sib.id_cache(my_42, sie42, 'the_ultimate_answer')
sage: sib.result(sib(my_42) + sib(my_42))
R.<x> = ZZ[]
the_ultimate_answer = 42*x
the_ultimate_answer + the_ultimate_answer
Since id_cache keys off of object identity ("is"), the
following does not trigger the cache.::
sage: sib.result(sib(42*x) + sib(42*x))
42*x + 42*x
Note that we don't assign the result to a variable if the value
is only used once.::
sage: sib = SageInputBuilder()
sage: my_42 = 42*x
sage: sie42 = sib(my_42)
sage: sib.id_cache(my_42, sie42, 'the_ultimate_answer')
sage: sib.result(sib(my_42) + sib(43*x))
R.<x> = ZZ[]
42*x + 43*x
"""
# If we just mapped id(x) -> sie, then it's possible that x could
# be freed and another value allocated at the same position,
# corrupting the cache. But since we store x, that can't happen;
# we don't even have to look at x when we read the cache.
self._id_cache[id(x)] = (x, sie)
sie._sie_preferred_varname = name
def import_name(self, module, name, alt_name=None):
r"""
INPUT:
- ``module``, ``name``, ``alt_name`` -- strings
Creates an expression that will import a name from a module and
then use that name.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: v1 = sib.import_name('sage.foo.bar', 'baz')
sage: v2 = sib.import_name('sage.foo.bar', 'ZZ', 'not_the_real_ZZ')
sage: sib.result(v1+v2)
from sage.foo.bar import baz
from sage.foo.bar import ZZ as not_the_real_ZZ
baz + not_the_real_ZZ
We adjust the names if there is a conflict.::
sage: sib = SageInputBuilder()
sage: v1 = sib.import_name('sage.foo', 'poly')
sage: v2 = sib.import_name('sage.bar', 'poly')
sage: sib.result(v1+v2)
from sage.foo import poly as poly1
from sage.bar import poly as poly2
poly1 + poly2
"""
return SIE_import_name(self, module, name, alt_name)
def assign(self, e, val):
r"""
Constructs a command that performs the assignment ``e=val``.
Can only be used as an argument to the ``command`` method.
INPUT:
- ``e``, ``val`` -- SageInputExpression
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: circular = sib([None])
sage: sib.command(circular, sib.assign(circular[0], circular))
sage: sib.result(circular)
si = [None]
si[0] = si
si
"""
e = self(e)
val = self(val)
return SIE_assign(self, e, val)
def command(self, v, cmd):
r"""
INPUT:
- ``v``, ``cmd`` -- SageInputExpression
Attaches a command to v, which will be executed before v is used.
Multiple commands will be executed in the order added.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: incr_list = sib([])
sage: sib.command(incr_list, incr_list.append(1))
sage: sib.command(incr_list, incr_list.extend([2, 3]))
sage: sib.result(incr_list)
si = []
si.append(1)
si.extend([2, 3])
si
"""
v = self(v)
cmd = self(cmd)
v._sie_commands.append(cmd)
def dict(self, entries):
r"""
Given a dictionary, or a list of (key, value) pairs,
produces a :class:`SageInputExpression` representing
the dictionary.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.dict({1:1, 2:5/2, 3:100/3}))
{1:1, 2:5/2, 3:100/3}
sage: sib.result(sib.dict([('hello', 'sunshine'), ('goodbye', 'rain')]))
{'hello':'sunshine', 'goodbye':'rain'}
"""
if isinstance(entries, dict):
entries = list(entries.items())
entries = [(self(key),self(val)) for (key,val) in entries]
return SIE_dict(self, entries)
def getattr(self, sie, attr):
r"""
Given a :class:`SageInputExpression` representing ``foo``
and an attribute name bar, produce a :class:`SageInputExpression`
representing ``foo.bar``. Normally, you could just use
attribute-access syntax, but that doesn't work if bar
is some attribute that bypasses __getattr__ (such as if
bar is '__getattr__' itself).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.getattr(ZZ, '__getattr__')
{getattr: {atomic:ZZ}.__getattr__}
sage: sib.getattr(sib.name('foo'), '__new__')
{getattr: {atomic:foo}.__new__}
"""
return SIE_getattr(self, self(sie), attr)
def empty_subscript(self, parent):
r"""
Given a :class:`SageInputExpression` representing ``foo``,
produces a :class:`SageInputExpression` representing ``foo[]``.
Since this is not legal Python syntax, it is useful only for
producing the \sage generator syntax for a polynomial ring.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.empty_subscript(sib(2) + sib(3)))
(2 + 3)[]
The following calls this method indirectly.::
sage: sage_input(polygen(ZZ['y']))
R.<x> = ZZ['y'][]
x
"""
return SIE_subscript(self, parent, None)
def use_variable(self, sie, name):
r"""
Marks the :class:`SageInputExpression` ``sie`` to use a variable
even if it is only referenced once. (If ``sie`` is the final
top-level expression, though, it will not use a variable.)
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: e = sib.name('MatrixSpace')(ZZ, 10, 10)
sage: sib.use_variable(e, 'MS')
sage: sib.result(e.zero_matrix())
MS = MatrixSpace(ZZ, 10, 10)
MS.zero_matrix()
Without the call to use_variable, we get this instead::
sage: sib = SageInputBuilder()
sage: e = sib.name('MatrixSpace')(ZZ, 10, 10)
sage: sib.result(e.zero_matrix())
MatrixSpace(ZZ, 10, 10).zero_matrix()
And even with the call to use_variable, we don't use a variable here::
sage: sib = SageInputBuilder()
sage: e = sib.name('MatrixSpace')(ZZ, 10, 10)
sage: sib.use_variable(e, 'MS')
sage: sib.result(e)
MatrixSpace(ZZ, 10, 10)
"""
sie._sie_preferred_varname = name
sie._sie_request_use_var = True
def share(self, sie):
r"""
Mark the given expression as sharable, so that it will be replaced
by a variable if it occurs multiple times in the expression.
(Most non-single-token expressions are already sharable.)
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
Without explicitly using .share(), string literals are not shared::
sage: sib = SageInputBuilder()
sage: e = sib('hello')
sage: sib.result(sib((e, e)))
('hello', 'hello')
See the difference if we use .share()::
sage: sib = SageInputBuilder()
sage: e = sib('hello')
sage: sib.share(e)
sage: sib.result(sib((e, e)))
si = 'hello'
(si, si)
"""
sie._sie_share = True
def parent_with_gens(self, parent, sie, gen_names, name, gens_syntax=None):
r"""
This method is used for parents with generators, to manage the
\sage preparser generator syntax (like ``K.<x> = QQ[]``).
The \method{_sage_input_} method of a parent class with
generators should construct a :class:`SageInputExpression` for
the parent, and then call this method with the parent itself,
the constructed SIE, a sequence containing the names of the
generators, and (optionally) another SIE to use if the \sage
generator syntax is used; typically this will be the same as
the first SIE except omitting a ``names`` parameter.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: def test_setup(use_gens=True, preparse=True):
... sib = SageInputBuilder(preparse=preparse)
... gen_names=('foo', 'bar')
... parent = "some parent"
... normal_sie = sib.name('make_a_parent')(names=gen_names)
... if use_gens:
... gens_sie = sib.name('make_a_parent')()
... else:
... gens_sie = None
... name = 'the_thing'
... result = sib.parent_with_gens(parent, normal_sie,
... gen_names, name,
... gens_syntax=gens_sie)
... return sib, result
sage: sib, par_sie = test_setup()
sage: sib.result(par_sie)
make_a_parent(names=('foo', 'bar'))
sage: sib, par_sie = test_setup()
sage: sib.result(sib(3) * sib.gen("some parent", 0))
the_thing.<foo,bar> = make_a_parent()
3*foo
sage: sib, par_sie = test_setup(preparse=False)
sage: sib.result(par_sie)
make_a_parent(names=('foo', 'bar'))
sage: sib, par_sie = test_setup(preparse=False)
sage: sib.result(sib(3) * sib.gen("some parent", 0))
the_thing = make_a_parent(names=('foo', 'bar'))
foo,bar = the_thing.gens()
ZZ(3)*foo
sage: sib, par_sie = test_setup(use_gens=False)
sage: sib.result(par_sie)
make_a_parent(names=('foo', 'bar'))
sage: sib, par_sie = test_setup(use_gens=False)
sage: sib.result(sib(3) * sib.gen("some parent", 0))
the_thing = make_a_parent(names=('foo', 'bar'))
foo,bar = the_thing.gens()
3*foo
sage: sib, par_sie = test_setup()
sage: sib.result(par_sie - sib.gen("some parent", 1))
the_thing.<foo,bar> = make_a_parent()
the_thing - bar
"""
v = SIE_gens_constructor(self, sie, gen_names, gens_syntax=gens_syntax)
self.cache(parent, v, name)
gens = [SIE_gen(self, v, n) for n in gen_names]
self._parent_gens[parent] = gens
v._sie_gens = gens
return v
def gen(self, parent, n=0):
r"""
Given a parent, returns a :class:`SageInputExpression` for
the `n`-th (default 0) generator of the parent.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.gen(ZZ['y']))
R.<y> = ZZ[]
y
"""
if not parent in self._parent_gens:
self(parent)
if not parent in self._parent_gens:
raise ValueError("{} did not register generators for sage_input".format(parent))
gens = self._parent_gens[parent]
if n > len(gens):
raise ValueError("{} registered only {} generators for sage_input".format(parent, len(gens)))
return gens[n]
def prod(self, factors, simplify=False):
r"""
Given a sequence, returns a :class:`SageInputExpression`
for the product of the elements.
With ``simplify=True``, performs some simplifications
first. If any element is formatted as a string ``'0'``,
then that element is returned directly. If any element is
formatted as a string ``'1'``, then it is removed
from the sequence (unless it is the only element in the sequence).
And any negations are removed from the elements and moved to the
outside of the product.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.prod([-1, 0, 1, -2]))
-1*0*1*-2
sage: sib = SageInputBuilder()
sage: sib.result(sib.prod([-1, 0, 1, 2], simplify=True))
0
sage: sib = SageInputBuilder()
sage: sib.result(sib.prod([-1, 2, -3, -4], simplify=True))
-2*3*4
sage: sib = SageInputBuilder()
sage: sib.result(sib.prod([-1, 1, -1, -1], simplify=True))
-1
sage: sib = SageInputBuilder()
sage: sib.result(sib.prod([1, 1, 1], simplify=True))
1
"""
neg = False
factors = [self(factor) for factor in factors]
if simplify:
i = 0
while i < len(factors):
factor = factors[i]
while isinstance(factor, SIE_unary) and factor._sie_op == '-':
neg = not neg
factor = factor._sie_operand
factors[i] = factor
if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '0':
factors = [factor]
neg = False
break
if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '1':
factors[i:i+1] = []
else:
i += 1
if len(factors) == 0:
factors.append(SIE_literal_stringrep(self, '1'))
prod = factors[0]
for factor in factors[1:]:
prod = prod * factor
if neg:
prod = -prod
return prod
def sum(self, terms, simplify=False):
r"""
Given a sequence, returns a :class:`SageInputExpression`
for the product of the elements.
With ``simplify=True``, performs some simplifications
first. If any element is formatted as a string ``'0'``,
then it is removed from the sequence (unless it is the only
element in the sequence); and any instances of ``a + -b``
are changed to ``a - b``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.result(sib.sum([-1, 0, 1, 0, -1]))
-1 + 0 + 1 + 0 + -1
sage: sib = SageInputBuilder()
sage: sib.result(sib.sum([-1, 0, 1, 0, -1], simplify=True))
-1 + 1 - 1
sage: sib = SageInputBuilder()
sage: sib.result(sib.sum([0, 0, 0], simplify=True))
0
"""
terms = [self(term) for term in terms]
if simplify:
i = 0
while i < len(terms):
term = terms[i]
if isinstance(term, SIE_literal_stringrep) and term._sie_value == '0':
terms[i:i+1] = []
else:
i += 1
if len(terms) == 0:
terms.append(SIE_literal_stringrep(self, '0'))
sum = terms[0]
for term in terms[1:]:
negate = False
while simplify and isinstance(term, SIE_unary) and term._sie_op == '-':
negate = not negate
term = term._sie_operand
if negate:
sum = sum - term
else:
sum = sum + term
return sum
def result(self, e):
r"""
Given a :class:`SageInputExpression` constructed using ``self``,
returns a tuple of a list of commands and an expression
(and possibly a dictionary of local variables) suitable for
:func:`sage_eval`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: r = sib.result(sib(6) * sib(7)); r
6*7
sage: tuple(r)
('', '6*7')
"""
sif = SageInputFormatter()
# Even if use_variable was called on e, don't automatically
# use a variable for it.
e._sie_request_use_var = False
e._sie_prepare(sif)
s = sif.format(e, 0)
locals = self._locals
if len(locals):
return SageInputAnswer(sif._commands, sif.format(e, 0), locals)
else:
return SageInputAnswer(sif._commands, sif.format(e, 0))
# Python's precedence levels. Hand-transcribed from section 5.14 of
# the Python reference manual.
_prec_lambda = 2
_prec_or = 4
_prec_and = 6
_prec_not = 8
_prec_membership = 10
_prec_identity = 12
_prec_comparison = 14
_prec_bitor = 16
_prec_bitxor = 18
_prec_bitand = 20
_prec_shift = 22
_prec_addsub = 24
_prec_muldiv = 26
_prec_negate = 28
_prec_bitnot = 30
_prec_exponent = 32
_prec_attribute = 34
_prec_subscript = 36
_prec_slicing = 38
_prec_funcall = 40
_prec_atomic = 42
class SageInputExpression(object):
r"""
Subclasses of this class represent expressions for :func:`sage_input`.
\sage classes should define a \method{_sage_input_} method, which
will return an instance of :class:`SageInputExpression`, created using
methods of :class:`SageInputBuilder`.
To the extent possible, operations on :class:`SageInputExpression` objects
construct a new :class:`SageInputExpression` representing that operation.
That is, if ``a`` is a :class:`SageInputExpression`, then ``a + b``
constructs a :class:`SageInputExpression` representing this sum.
This also works for attribute access, function calls, subscripts, etc.
Since arbitrary attribute accesses might be used to construct a new
attribute-access expression, all internal attributes and methods
have names that begin with ``_sie_`` to reduce the chance of
collisions.
It is expected that instances of this class will not be directly
created outside this module; instead, instances will be created
using methods of :class:`SageInputBuilder` and :class:`SageInputExpression`.
Values of type :class:`SageInputExpression` print in a fairly ugly
way, that reveals the internal structure of the expression tree.
"""
def __init__(self, sib):
r"""
Initialize a :class:`SageInputExpression`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3) # indirect doctest
sage: sie
{atomic:3}
sage: sie._sie_builder is sib
True
"""
self._sie_refcount = 0
self._sie_builder = sib
self._sie_context = None
self._sie_preferred_varname = None
self._sie_varname = None
self._sie_request_use_var = False
self._sie_use_var = False
self._sie_requested_varname = False
self._sie_commands = []
def _sie_is_simple(self):
r"""
Returns ``True`` if this :class:`SageInputExpression` is simple
enough that duplicate uses are not worth caching. Normally
this will be true if the expression represents a single token.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.name('QQ')._sie_is_simple()
True
sage: sib(GF(2))._sie_is_simple()
False
"""
return False
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SageInputExpression`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: len(sib(GF(2))._sie_referenced())
2
sage: sib(5)._sie_referenced()
[]
"""
return []
def _sie_prepare(self, sif):
r"""
We traverse the entire expression DAG to prepare for printing.
Here, we notice nodes with more than one parent, and mark them
to replace with a variable (rather than generating the value
multiple times).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: pair = sib((GF(2), GF(2)))
sage: single = sib(GF(2))
sage: single._sie_refcount
0
sage: single._sie_use_var
False
sage: sib((GF(2), GF(2)))._sie_prepare(sif)
sage: single._sie_refcount
2
sage: single._sie_use_var
True
"""
if self._sie_context is not sif:
self._sie_context = sif
self._sie_refcount = 0
self._sie_refcount += 1
if self._sie_request_use_var:
self._sie_require_varname(sif)
self._sie_use_var = True
if not self._sie_is_simple():
if self._sie_refcount == 2:
self._sie_require_varname(sif)
self._sie_use_var = True
if self._sie_refcount == 1:
for r in self._sie_referenced():
r._sie_prepare(sif)
for r in self._sie_commands:
r._sie_prepare(sif)
def _sie_require_varname(self, sif):
r"""
Mark this :class:`SageInputExpression` as requiring a variable name,
and register it with a :class:`SageInputFormatter` (which will
allocate a variable name at the end of the preparatory phase).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib(3)
sage: sie._sie_require_varname(sif)
sage: sie._sie_requested_varname
True
"""
if not self._sie_requested_varname:
sif.register_name(self._sie_preferred_varname)
self._sie_requested_varname = True
self._sie_generated = False
def _sie_get_varname(self, sif):
r"""
Get the variable name that the :class:`SageInputFormatter` allocated
for this :class:`SageInputExpression`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib(3)
sage: sie._sie_require_varname(sif)
sage: sie._sie_get_varname(sif)
'si'
"""
if self._sie_varname is None:
self._sie_varname = sif.get_name(self._sie_preferred_varname)
return self._sie_varname
def _sie_is_negation(self):
r"""
Test whether a :class:`SageInputExpression` is a negation.
Despite the obscure name, this is intended to be a public method.
See the documentation for \method{SIE_unary._sie_is_negation}
for useful examples.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sie = sib.name('foo')
sage: sie._sie_is_negation()
False
"""
return False
def __call__(self, *args, **kwargs):
r"""
Given a :class:`SageInputExpression`, build a new
:class:`SageInputExpression` representing a function call node
(with ``self`` as the function).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie(4)
{call: {atomic:3}({atomic:4})}
"""
args = [self._sie_builder(_) for _ in args]
for k in kwargs:
kwargs[k] = self._sie_builder(kwargs[k])
return SIE_call(self._sie_builder, self, args, kwargs)
def __getitem__(self, key):
r"""
Given a :class:`SageInputExpression`, build a new
:class:`SageInputExpression` representing a subscript expression
(with ``self`` as the value being subscripted).
Currently, slices are not supported.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie[4]
{subscr: {atomic:3}[{atomic:4}]}
sage: sie[sib.name('x'), sib.name('y')]
{subscr: {atomic:3}[{tuple: ({atomic:x}, {atomic:y})}]}
"""
skey = self._sie_builder(key)
return SIE_subscript(self._sie_builder, self, skey)
def __getattr__(self, attr):
r"""
Given a :class:`SageInputExpression`, build a new
:class:`SageInputExpression` representing an attribute access.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('x')
sage: sie.foo
{getattr: {atomic:x}.foo}
sage: sie.foo()
{call: {getattr: {atomic:x}.foo}()}
"""
return SIE_getattr(self._sie_builder, self, attr)
def _rich_repr_(self, display_manager, **kwds):
"""
Disable rich output.
This is necessary because otherwise our :meth:`__getattr__`
would be called.
EXAMPLES::
sage: from sage.repl.rich_output import get_display_manager
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('x')
sage: sie._rich_repr_(get_display_manager()) is None
True
"""
return None
def __pow__(self, other):
r"""
Compute an expression tree for ``self ** other``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie ^ 4
{binop:** {atomic:3} {atomic:4}}
"""
return self._sie_binop('**', other)
def __mul__(self, other):
r"""
Compute an expression tree for ``self * other``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie * 4
{binop:* {atomic:3} {atomic:4}}
"""
return self._sie_binop('*', other)
def __div__(self, other):
r"""
Compute an expression tree for ``self / other``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie / 4
{binop:/ {atomic:3} {atomic:4}}
"""
return self._sie_binop('/', other)
def __add__(self, other):
r"""
Compute an expression tree for ``self + other``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie + 4
{binop:+ {atomic:3} {atomic:4}}
"""
return self._sie_binop('+', other)
def __sub__(self, other):
r"""
Compute an expression tree for ``self - other``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie - 4
{binop:- {atomic:3} {atomic:4}}
"""
return self._sie_binop('-', other)
def _sie_binop(self, op, other):
r"""
Compute an expression tree for ``self OP other``,
where OP is a string representing a binary operator (such as
'+' or '**').
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: v = sib.name('x')._sie_binop('%', sib.name('y'))
sage: type(v)
<class 'sage.misc.sage_input.SIE_binary'>
sage: (v)._sie_op
'%'
sage: v
{binop:% {atomic:x} {atomic:y}}
"""
return SIE_binary(self._sie_builder, op, self, self._sie_builder(other))
def __neg__(self):
r"""
Compute an expression tree for ``-self``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: -sie
{unop:- {atomic:3}}
"""
return self._sie_unop('-')
def __invert__(self):
r"""
Compute an expression tree for ``~self``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: ~sie
{unop:~ {atomic:3}}
"""
return self._sie_unop('~')
def __abs__(self):
r"""
Compute an expression tree for ``abs(self)``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: abs(sie)
{call: {atomic:abs}({atomic:3})}
"""
return self._sie_builder.name('abs')(self)
def _sie_unop(self, op):
r"""
Compute an expression tree for ``OP self``,
where OP is a string representing a unary operator (such as
'-' or '~').
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: v = sie._sie_unop('~')
sage: type(v)
<class 'sage.misc.sage_input.SIE_unary'>
sage: (v)._sie_op
'~'
sage: v
{unop:~ {atomic:3}}
"""
return SIE_unary(self._sie_builder, op, self)
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression, and the
precedence of the top-level operator in the expression.
EXAMPLES:
Actually, all of these are examples of the \method{_sie_format}
method on subclasses of :class:`SageInputExpression`;
:class:`SageInputExpression` itself is an abstract base class
(that cannot be instantiated).::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib(3)
sage: for v in (sie, sie+7, sie/5):
... v._sie_prepare(sif)
... v._sie_format(sif)
('3', 42)
('3 + 7', 24)
('3/5', 26)
sage: v = sib.assign(sib.name('foo').x, 3)
sage: v._sie_prepare(sif)
sage: v._sie_format(sif)
Traceback (most recent call last):
...
ValueError: Cannot format SIE_assign as expression
"""
raise NotImplementedError
def _sie_format_statement(self, sif):
r"""
Return the formatted string value of this expression, when
used as a statement.
On most :class:`SageInputExpression`s, this forwards directly
to the \method{_sie_format} method. However, on
:class:`SageInputExpression`s that actually represent
statements (such as :class:`SIE_assign`), this method
has an implementation and \method{_sie_format} raises
an error. (This is to prevent accidental use of
:class:`SIE_assign` as a value.)
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: v = sib(3)
sage: v._sie_prepare(sif)
sage: v._sie_format_statement(sif)
'3'
sage: v = sib.assign(sib.name('foo').x, 3)
sage: v._sie_prepare(sif)
sage: v._sie_format_statement(sif)
'foo.x = 3'
"""
result, prec = self._sie_format(sif)
return result
class SIE_literal(SageInputExpression):
r"""
An abstract base class for ``literals`` (basically, values which
consist of a single token).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SIE_literal
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie
{atomic:3}
sage: isinstance(sie, SIE_literal)
True
"""
def _sie_is_simple(self):
r"""
Report that :class:`SIE_literal` values are not worth replacing by
variables (for ``common subexpression elimination``) even if they
occur multiple times in an expression.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib(3)
sage: sie._sie_is_simple()
True
sage: sib.share(sie)
sage: sie._sie_is_simple()
False
sage: sie._sie_share
True
"""
# Perhaps this should actually look at the formatted length of self,
# and sometimes return false? If some 50-digit integer occurs multiple
# times in an expression, it might be better to do the replacement.
return not self._sie_share
class SIE_literal_stringrep(SIE_literal):
r"""
Values in this class are leaves in a :func:`sage_input` expression
tree. Typically they represent a single token, and consist of the
string representation of that token. They are used for integer,
floating-point, and string literals, and for name expressions.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SIE_literal_stringrep
sage: sib = SageInputBuilder()
sage: isinstance(sib(3), SIE_literal_stringrep)
True
sage: isinstance(sib(3.14159, True), SIE_literal_stringrep)
True
sage: isinstance(sib.name('pi'), SIE_literal_stringrep)
True
sage: isinstance(sib(False), SIE_literal_stringrep)
True
sage: sib(False)
{atomic:False}
"""
def __init__(self, sib, n):
r"""
Initialize a :class:`SIE_literal_stringrep` value.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``n`` - a string; the value to be printed for this expression
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib(3)
{atomic:3}
sage: sib(3)._sie_value
'3'
"""
super(SIE_literal_stringrep, self).__init__(sib)
self._sie_value = str(n)
self._sie_share = False
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_literal_stringrep`
value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib(3)
{atomic:3}
sage: sib("\n")
{atomic:'\n'}
"""
return "{atomic:%s}" % self._sie_value
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression, and an indication
that it is ``atomic`` (never needs to be parenthesized).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib(True)
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
('True', 42)
"""
return self._sie_value, _prec_atomic
class SIE_call(SageInputExpression):
r"""
This class represents a function-call node in a :func:`sage_input`
expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('GF')
sage: sie(49)
{call: {atomic:GF}({atomic:49})}
"""
def __init__(self, sib, func, args, kwargs):
r"""
Initialize an instance of :class:`SIE_call`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``func`` - a :class:`SageInputExpression` representing a function
- ``args`` - a list of :class:`SageInputExpression`s representing the
positional arguments
- ``kwargs`` -- a dictionary mapping strings to
:class:`SageInputExpression`s representing the keyword arguments
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib('RealField')(53, rnd='RNDZ')
"""
super(SIE_call, self).__init__(sib)
self._sie_func = func
self._sie_args = args
self._sie_kwargs = kwargs
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_call` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib('RealField')(53, rnd='RNDZ')
"""
func = repr(self._sie_func)
args = [repr(arg) for arg in self._sie_args]
kwargs = sorted(k + '=' + repr(v) for k, v in self._sie_kwargs.iteritems())
all_args = ', '.join(args + kwargs)
return "{call: %s(%s)}" % (func, all_args)
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this :class:`SIE_call`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib('RealField')(53, rnd='RNDZ')
sage: sie._sie_referenced()
[{atomic:53}, {atomic:'RealField'}, {atomic:'RNDZ'}]
"""
refs = self._sie_args[:]
refs.append(self._sie_func)
refs.extend(self._sie_kwargs.itervalues())
return refs
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression, and an indication
that it is a function call.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.name('RealField')(53, rnd='RNDZ')
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
("RealField(53, rnd='RNDZ')", 40)
"""
func = sif.format(self._sie_func, _prec_attribute)
args = [sif.format(arg, 0) for arg in self._sie_args]
kwargs = sorted(k + '=' + sif.format(v, 0) for k, v in self._sie_kwargs.iteritems())
all_args = ', '.join(args + kwargs)
return ('%s(%s)' % (func, all_args), _prec_funcall)
class SIE_subscript(SageInputExpression):
r"""
This class represents a subscript node in a :func:`sage_input`
expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('QQ')['x,y']
sage: sie
{subscr: {atomic:QQ}[{atomic:'x,y'}]}
"""
def __init__(self, sib, coll, key):
r"""
Initialize an instance of :class:`SIE_subscript`.
INPUT:
- ``sib`` -- a :class:`SageInputBuilder`
- ``coll`` -- a :class:`SageInputExpression` representing a collection
- ``key`` -- a :class:`SageInputExpression` representing the subscript/key
As a special case, ``key`` may be ``None``; this represents an
empty subscript. This is not legal Python syntax, but it is legal
in the \sage preparser in examples like ``K.<x> = QQ[]``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.name('QQ')['x']
{subscr: {atomic:QQ}[{atomic:'x'}]}
sage: sib.name('x')[1,2,3]
{subscr: {atomic:x}[{tuple: ({atomic:1}, {atomic:2}, {atomic:3})}]}
sage: sib.empty_subscript(sib.name('QQ'))
{subscr: {atomic:QQ}[]}
"""
super(SIE_subscript, self).__init__(sib)
self._sie_coll = coll
self._sie_key = key
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_subscript` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.name('ZZ')['x,y']
{subscr: {atomic:ZZ}[{atomic:'x,y'}]}
"""
coll = repr(self._sie_coll)
if self._sie_key is None:
key = ''
else:
key = repr(self._sie_key)
return "{subscr: %s[%s]}" % (coll, key)
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_subscript`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('GF')(5)['x,y']
sage: sie._sie_referenced()
[{call: {atomic:GF}({atomic:5})}, {atomic:'x,y'}]
"""
refs = [self._sie_coll]
if self._sie_key is not None:
refs.append(self._sie_key)
return refs
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression, and an
indication that it is a subscript.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.name('QQ')['x']
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
("QQ['x']", 36)
"""
coll = sif.format(self._sie_coll, _prec_attribute)
if self._sie_key is None:
key = ''
else:
key = sif.format(self._sie_key, 0)
return '%s[%s]' % (coll, key), _prec_subscript
class SIE_getattr(SageInputExpression):
r"""
This class represents a getattr node in a :func:`sage_input`
expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('CC').gen()
sage: sie
{call: {getattr: {atomic:CC}.gen}()}
"""
def __init__(self, sib, obj, attr):
r"""
Initialize an instance of :class:`SIE_getattr`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``obj`` - a :class:`SageInputExpression` representing an object
- ``attr`` - a string; the attribute name
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.name('QQbar').zeta(5)
{call: {getattr: {atomic:QQbar}.zeta}({atomic:5})}
"""
super(SIE_getattr, self).__init__(sib)
self._sie_obj = obj
self._sie_attr = attr
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_getattr` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.name('AA')(3).sqrt()
{call: {getattr: {call: {atomic:AA}({atomic:3})}.sqrt}()}
"""
obj = repr(self._sie_obj)
return "{getattr: %s.%s}" % (obj, self._sie_attr)
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_subscript`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('CDF').gen
sage: sie._sie_referenced()
[{atomic:CDF}]
"""
return [self._sie_obj]
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression, and an
indication that it is an attribute reference.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.name('AA').common_polynomial
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
('AA.common_polynomial', 34)
"""
obj = sif.format(self._sie_obj, _prec_exponent)
return '%s.%s' % (obj, self._sie_attr), _prec_attribute
class SIE_tuple(SageInputExpression):
r"""
This class represents a tuple or list node in a :func:`sage_input`
expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib((1, 'howdy'))
{tuple: ({atomic:1}, {atomic:'howdy'})}
sage: sib(["lists"])
{list: ({atomic:'lists'})}
"""
def __init__(self, sib, values, is_list):
r"""
Initialize an instance of :class:`SIE_tuple`.
INPUT:
- ``sib`` -- a :class:`SageInputBuilder`
- ``values`` -- a list of :class:`SageInputExpression`s representing the
elements of this tuple
- ``is_list`` -- is True if this class represents a list, False for a
tuple
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib((3.5, -2))
{tuple: ({atomic:3.5}, {unop:- {atomic:2}})}
sage: sib(["Hello", "world"])
{list: ({atomic:'Hello'}, {atomic:'world'})}
"""
super(SIE_tuple, self).__init__(sib)
self._sie_values = values
self._sie_is_list = is_list
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_tuple` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib((2,3,5))
{tuple: ({atomic:2}, {atomic:3}, {atomic:5})}
sage: sib(["Hello", "world"])
{list: ({atomic:'Hello'}, {atomic:'world'})}
"""
kind = "list" if self._sie_is_list else "tuple"
return "{%s: (%s)}" % \
(kind, ', '.join([repr(v) for v in self._sie_values]))
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_tuple`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib((ZZ, GF(5)))
sage: sie._sie_referenced()
[{atomic:ZZ}, {call: {atomic:GF}({atomic:5})}]
"""
return self._sie_values
def _sie_format(self, sif):
r"""
Return the formatted string value of this tuple or list, and an
indication that it is atomic (never needs to be parenthesized).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: for v in ((), (1,), (1,2), [], [1], [1,2]):
... sie = sib(v)
... sie._sie_prepare(sif)
... sie._sie_format(sif)
('()', 42)
('(1,)', 42)
('(1, 2)', 42)
('[]', 42)
('[1]', 42)
('[1, 2]', 42)
"""
values = [sif.format(val, 0) for val in self._sie_values]
if self._sie_is_list:
return '[%s]' % ', '.join(values), _prec_atomic
else:
if len(values) == 1:
return '(%s,)' % values[0], _prec_atomic
else:
return '(%s)' % ', '.join(values), _prec_atomic
class SIE_dict(SageInputExpression):
r"""
This class represents a dict node in a :func:`sage_input`
expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.dict([('TeX', RR(pi)), ('Metafont', RR(e))])
{dict: {{atomic:'TeX'}:{call: {atomic:RR}({atomic:3.1415926535897931})}, {atomic:'Metafont'}:{call: {atomic:RR}({atomic:2.7182818284590451})}}}
sage: sib.dict({-40:-40, 0:32, 100:212})
{dict: {{unop:- {atomic:40}}:{unop:- {atomic:40}}, {atomic:0}:{atomic:32}, {atomic:100}:{atomic:212}}}
"""
def __init__(self, sib, entries):
r"""
Initialize an instance of :class:`SIE_dict`.
INPUT:
- ``sib`` -- a :class:`SageInputBuilder`
- ``entries`` -- a list of pairs of :class:`SageInputExpression`s
representing the entries of this dict
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.dict({'us':'good', 'them':'bad'})
{dict: {{atomic:'them'}:{atomic:'bad'}, {atomic:'us'}:{atomic:'good'}}}
sage: sib.dict([(10, 'PS2'), (12, 'PS2'), (13, 'PS3')])
{dict: {{atomic:10}:{atomic:'PS2'}, {atomic:12}:{atomic:'PS2'}, {atomic:13}:{atomic:'PS3'}}}
"""
super(SIE_dict, self).__init__(sib)
self._sie_entries = entries
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_dict` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.dict({'keaton':'general', 'chan':'master'})
{dict: {{atomic:'keaton'}:{atomic:'general'}, {atomic:'chan'}:{atomic:'master'}}}
"""
return "{dict: {%s}}" % \
', '.join([repr(key) + ':' + repr(val)
for key,val in self._sie_entries])
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_dict`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.dict({1:'beguilement', 2:'legacy', 3:'passage'})
sage: sie._sie_referenced()
[{atomic:1}, {atomic:2}, {atomic:3}, {atomic:'beguilement'}, {atomic:'legacy'}, {atomic:'passage'}]
"""
return [k for k,v in self._sie_entries] + [v for k,v in self._sie_entries]
def _sie_format(self, sif):
r"""
Return the formatted string value of this dict, and an
indication that it is atomic (never needs to be parenthesized).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.dict({'carnivores':1, 'thinking':2, 'triumph':3})
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
("{'carnivores':1, 'thinking':2, 'triumph':3}", 42)
"""
return "{%s}" %\
', '.join(sif.format(k, 0)+':'+sif.format(v, 0) for k,v in self._sie_entries), _prec_atomic
class SIE_binary(SageInputExpression):
r"""
This class represents an arithmetic expression with a binary operator
and its two arguments, in a :func:`sage_input` expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib(3)+5
{binop:+ {atomic:3} {atomic:5}}
"""
def __init__(self, sib, op, lhs, rhs):
r"""
Initialize an instance of :class:`SIE_binary`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``op`` - a string representing a binary operator, such as '*' or '%'
- ``lhs`` - a :class:`SageInputExpression`
- ``rhs`` - a :class:`SageInputExpression`
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib(3)*5
{binop:* {atomic:3} {atomic:5}}
"""
super(SIE_binary, self).__init__(sib)
self._sie_op = op
self._sie_operands = (lhs, rhs)
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_binary` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib(7)/9
{binop:/ {atomic:7} {atomic:9}}
"""
return "{binop:%s %s %s}" % (self._sie_op, repr(self._sie_operands[0]), repr(self._sie_operands[1]))
def _sie_referenced(self):
r"""
Returns a tuple of the immediate subexpressions of this
:class:`SIE_binary`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.name('x') + 5
sage: sie._sie_referenced()
({atomic:x}, {atomic:5})
"""
return self._sie_operands
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression,
and the precedence of the top-level operator in the expression.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: x = sib.name('x')
sage: y = sib.name('y')
sage: for v in (x+y, x*y, x**y):
... v._sie_prepare(sif)
... v._sie_format(sif)
('x + y', 24)
('x*y', 26)
('x^y', 32)
Note that the printing for $x^y$ varies depending on whether the
preparser is enabled.::
sage: sibnp = SageInputBuilder(preparse=False)
sage: sif = SageInputFormatter()
sage: v = x**y
sage: v._sie_prepare(sif)
sage: v._sie_format(sif)
('x^y', 32)
TESTS::
sage: x = sib.name('x')
sage: y = sib.name('y')
sage: z = sib.name('z')
sage: sib.result((x+y)+z)
x + y + z
sage: sib.result(x+(y+z))
x + (y + z)
sage: sib.result((x*y)*z)
x*y*z
sage: sib.result(x*(y*z))
x*(y*z)
sage: sib.result(x+(y*z))
x + y*z
sage: sib.result((x+y)*z)
(x + y)*z
sage: sib.result((x^y)^z)
(x^y)^z
sage: sib.result(x^(y^z))
x^y^z
"""
op = self._sie_op
fop = op
if op == '**':
lhs = sif.format(self._sie_operands[0], _prec_exponent+1)
rhs = sif.format(self._sie_operands[1], _prec_exponent)
if self._sie_builder.preparse():
return '%s^%s' % (lhs, rhs), _prec_exponent
else:
return '%s**%s' % (lhs, rhs), _prec_exponent
if op == '*':
prec = _prec_muldiv
elif op == '/':
prec = _prec_muldiv
elif op == '+':
fop = ' + '
prec = _prec_addsub
elif op == '-':
fop = ' - '
prec = _prec_addsub
else:
raise ValueError('Unhandled op {} in SIE_binary'.format(op))
lhs = sif.format(self._sie_operands[0], prec)
rhs = sif.format(self._sie_operands[1], prec+1)
return '%s%s%s' % (lhs, fop, rhs), prec
class SIE_unary(SageInputExpression):
r"""
This class represents an arithmetic expression with a unary operator
and its argument, in a :func:`sage_input` expression tree.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: -sib(256)
{unop:- {atomic:256}}
"""
def __init__(self, sib, op, operand):
r"""
Initialize an instance of :class:`SIE_unary`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``op`` - a string representing a unary operator, such as '-'
- ``operand`` -- a :class:`SageInputExpression`
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: -sib(3)
{unop:- {atomic:3}}
"""
super(SIE_unary, self).__init__(sib)
self._sie_op = op
self._sie_operand = operand
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_unary` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: -sib(15)
{unop:- {atomic:15}}
"""
return "{unop:%s %s}" % (self._sie_op, repr(self._sie_operand))
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_unary`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = -sib.name('x')
sage: sie._sie_referenced()
[{atomic:x}]
"""
return [self._sie_operand]
def _sie_format(self, sif):
r"""
Return the formatted string value of this expression,
and the precedence of the top-level operator in the expression.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: x = sib.name('x')
sage: v = -x
sage: v._sie_prepare(sif)
sage: v._sie_format(sif)
('-x', 28)
sage: v = ~x
sage: v._sie_prepare(sif)
sage: v._sie_format(sif)
('~x', 30)
TESTS::
sage: x = sib.name('x')
sage: y = sib.name('y')
sage: sib.result((-x)+y)
-x + y
sage: sib.result(x+(-y))
x + -y
sage: sib.result(-(x+y))
-(x + y)
sage: sib.result(-(-x))
--x
sage: sib.result(x-(-y))
x - -y
We assume that -(x*y) is always equal to (-x)*y. Using this
assumption, we print -(x*y) as -x*y, which parses as (-x)*y.::
sage: sib.result(-(x*y))
-x*y
sage: sib.result((-x)*y)
-x*y
sage: sib.result(x*(-y))
x*-y
"""
op = self._sie_op
fop = op
rprec = None
if op == '-':
# We print -(a*b) as -a*b, even though that will parse as
# (-a)*b.
prec = _prec_muldiv
rprec = _prec_negate
elif op == '~':
prec = _prec_bitnot
else:
raise ValueError('Unhandled op {} in SIE_unary'.format(op))
if rprec is None: rprec = prec
return '%s%s' % (fop, sif.format(self._sie_operand, prec)), rprec
def _sie_is_negation(self):
r"""
Test whether a :class:`SageInputExpression` is a negation.
Despite the obscure name, this is intended to be a public method.
This is used in the \method{_sage_input_} method for
:class:`ComplexNumber`, so that ``sage_input(CC(-3))`` will
produce ``-CC(3)`` instead of ``CC(-3)``. (This is preferred
so that you get ``x - CC(3)`` instead of ``x + CC(-3)``.)
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: x = sib.name('x')
sage: v = -x
sage: def mk_CC(b):
... if b._sie_is_negation():
... return -sib.name('CC')(b._sie_operand)
... else:
... return sib.name('CC')(b)
sage: mk_CC(x)
{call: {atomic:CC}({atomic:x})}
sage: mk_CC(v)
{unop:- {call: {atomic:CC}({atomic:x})}}
"""
return self._sie_op == '-'
class SIE_gens_constructor(SageInputExpression):
r"""
This class represents an expression that can create a \sage parent
with named generators, optionally using the \sage preparser
generators syntax (like ``K.<x> = QQ[]``).
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: qq = sib.name('QQ')
sage: sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
{constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)}
"""
def __init__(self, sib, constr, gen_names, gens_syntax=None):
r"""
Initialize an instance of :class:`SIE_gens_constructor`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``constr`` - a :class:`SageInputExpression` for constructing this
parent ``normally``
- ``gen_names`` - a tuple of generator names
- ``gens_syntax`` -- an optional :class:`SageInputExpression` for
constructing this parent using the \sage preparser generators syntax
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: qq = sib.name('QQ')
sage: sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
{constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)}
"""
super(SIE_gens_constructor, self).__init__(sib)
self._sie_constr = constr
self._sie_gen_names = gen_names
self._sie_gens = None # will be overwritten from .parent_with_gens()
self._sie_gens_constr = gens_syntax
self._sie_assign_gens = False
self._sie_generated = False
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_gens_constructor` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: qq = sib.name('QQ')
sage: sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
{constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)}
"""
return "{constr_parent: %s with gens: %s}" % (repr(self._sie_constr), self._sie_gen_names)
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_gens_constructor`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: qq = sib.name('QQ')
sage: gc = sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
sage: gc._sie_referenced()
[{subscr: {atomic:QQ}[{atomic:'x'}]}]
"""
# This is used to determine if some expressions should be replaced
# by variables (if the expression has more than one parent in
# the expression DAG). We assume that all expressions in
# self._sie_gens_constr also occur in self._sie_constr.
return [self._sie_constr]
def _sie_gens_referenced(self, sif):
r"""
Mark that at least one of the generators in this
:class:`SIE_gens_constructor` is used. (This means we will actually
construct all of the generators.)
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: qq = sib.name('QQ')
sage: gc = sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
sage: gc._sie_assign_gens
False
sage: gc._sie_gens_referenced(sif)
sage: gc._sie_assign_gens
True
"""
self._sie_assign_gens = True
self._sie_require_varname(sif)
for gen in self._sie_gens:
gen._sie_require_varname(sif)
def _sie_add_command(self, sif):
r"""
Build commands to construct this parent and (if necessary)
its associated generators.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: qq = sib.name('QQ')
sage: gc = sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
sage: gc._sie_gens_referenced(sif)
sage: gc._sie_prepare(sif)
sage: gc._sie_add_command(sif)
sage: sif._commands
'QQx.<x> = QQ[]\n'
TESTS:
There are several tricky cases here.
We prefer the \sage preparser generators syntax::
sage: sage_input(polygen(ZZ))
R.<x> = ZZ[]
x
But of course we can't use that without the preparser::
sage: sage_input(polygen(ZZ), preparse=False)
R = ZZ['x']
x = R.gen()
x
We also can't use the preparser syntax if there is a conflict
between generator names. For example, this works::
sage: sage_input((polygen(ZZ), polygen(GF(17), 'y')))
R1.<x> = ZZ[]
R2.<y> = GF(17)[]
(x, y)
but this can't use the preparser syntax.::
sage: sage_input((polygen(ZZ), polygen(GF(17))))
R1 = ZZ['x']
x1 = R1.gen()
R2 = GF(17)['x']
x2 = R2.gen()
(x1, x2)
If we never use the generators, then we don't bother with the
preparser syntax.::
sage: sage_input((ZZ['x'], ZZ['x'], GF(17)['y']))
R = ZZ['x']
(R, R, GF(17)['y'])
"""
if not self._sie_generated:
if self._sie_builder.preparse() and \
self._sie_gens_constr is not None and \
all(g._sie_got_preferred(sif) for g in self._sie_gens):
s, _ = self._sie_gens_constr._sie_format(sif)
sif._commands += '%s.<%s> = %s\n' % (self._sie_get_varname(sif), ','.join(self._sie_gen_names), s)
else:
s, _ = self._sie_constr._sie_format(sif)
sif._commands += '%s = %s\n' % (self._sie_get_varname(sif), s)
if self._sie_assign_gens:
if len(self._sie_gens) == 1:
sif._commands += '%s = %s.gen()\n' % (self._sie_gens[0]._sie_get_varname(sif), self._sie_get_varname(sif))
else:
sif._commands += '%s = %s.gens()\n' % (','.join([g._sie_get_varname(sif) for g in self._sie_gens]), self._sie_get_varname(sif))
self._sie_generated = True
def _sie_format(self, sif):
r"""
Return the formatted string value of this parent-construction
expression, and its precedence.
As a side effect, if the generators of this parent are used,
this adds commands to assign the generators to names.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: qq = sib.name('QQ')
sage: gc = sib.parent_with_gens("some parent", qq['x'],
... ('x',), 'QQx',
... gens_syntax=sib.empty_subscript(qq))
sage: gc._sie_gens_referenced(sif)
sage: gc._sie_prepare(sif)
sage: gc._sie_format(sif)
('QQx', 42)
sage: sif._commands
'QQx.<x> = QQ[]\n'
"""
if self._sie_assign_gens:
self._sie_add_command(sif)
return self._sie_get_varname(sif), _prec_atomic
return self._sie_constr._sie_format(sif)
class SIE_gen(SageInputExpression):
r"""
This class represents a named generator of a parent with named
generators.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.gen(ZZ['x'])
{gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}}
"""
def __init__(self, sib, parent, name):
r"""
Initializes an instance of :class:`SIE_gen`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``parent`` - a :class:`SIE_gens_constructor`
- ``name`` - a string with the name of this generator
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.gen(ZZ['x']) # indirect doctest
{gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}}
"""
super(SIE_gen, self).__init__(sib)
self._sie_parent = parent
self._sie_preferred_varname = name
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_gen` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.gen(ZZ['x']) # indirect doctest
{gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}}
"""
return "{gen:%s %s}" % (self._sie_preferred_varname, repr(self._sie_parent))
def _sie_is_simple(self):
r"""
Report that :class:`SIE_gen` values are single tokens.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.gen(ZZ['x'])._sie_is_simple()
True
"""
return True
def _sie_prepare(self, sif):
r"""
We override the \method{_sie_prepare} method from
:class:`SageInputExpression` to additionally mark the parent of this
generator that the generator names must be assigned.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.gen(GF(13)['z'])
sage: sie._sie_parent._sie_assign_gens
False
sage: sie._sie_prepare(sif)
sage: sie._sie_parent._sie_assign_gens
True
"""
super(SIE_gen, self)._sie_prepare(sif)
self._sie_parent._sie_gens_referenced(sif)
def _sie_format(self, sif):
r"""
Return the formatted string value of this named generator,
and an indication that it is atomic.
As a side effect, this generates commands to assign the generators
of the parent to variables.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.gen(GF(41)['x'])
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
('x', 42)
sage: sif._commands
'R.<x> = GF(41)[]\n'
"""
self._sie_parent._sie_add_command(sif)
return self._sie_get_varname(sif), _prec_atomic
def _sie_got_preferred(self, sif):
r"""
Check whether the :class:`SageInputFormatter` assigned us a
variable name which is the same as the name of the generator
name.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
First we verify that if we use two generators with different
names, then they get their preferred names.::
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: v = sib.gen(GF(2)['x']); w = sib.gen(GF(3)['y'])
sage: v._sie_prepare(sif); w._sie_prepare(sif)
sage: v._sie_got_preferred(sif)
True
sage: w._sie_got_preferred(sif)
True
Now, we repeat the experiment, except that the generators now
have the same names. In this case, the :class:`SageInputFormatter`
will not use the generator name as the variable name, because
of this conflict.::
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: v = sib.gen(GF(2)['x']); w = sib.gen(GF(3)['x'])
sage: v._sie_prepare(sif); w._sie_prepare(sif)
sage: v._sie_got_preferred(sif)
False
sage: w._sie_got_preferred(sif)
False
"""
return self._sie_get_varname(sif) == self._sie_preferred_varname
class SIE_import_name(SageInputExpression):
r"""
This class represents a name which has been imported from a module.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.import_name('sage.rings.integer', 'make_integer')
{import:sage.rings.integer/make_integer}
sage: sib.import_name('sage.foo', 'happy', 'sad')
{import:sage.foo/happy as sad}
"""
def __init__(self, sib, module, name, alt_name=None):
r"""
Initializes an instance of :class:`SIE_import_name`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``module`` - a module name
- ``name`` - an object name
- ``alt_name`` - an alternate object name, or None (the default)
to use name
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.import_name('sage.rings.integer', 'make_integer') # indirect doctest
{import:sage.rings.integer/make_integer}
sage: sib.import_name('sage.foo', 'happy', 'sad')
{import:sage.foo/happy as sad}
"""
super(SIE_import_name, self).__init__(sib)
self._sie_formatted = False
self._sie_module_name = module
self._sie_object_name = name
if alt_name is None:
self._sie_preferred_varname = name
else:
self._sie_preferred_varname = alt_name
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_import_name` value.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.import_name('sage.rings.integer', 'make_integer') # indirect doctest
{import:sage.rings.integer/make_integer}
sage: sib.import_name('sage.foo', 'happy', 'sad')
{import:sage.foo/happy as sad}
"""
return "{import:%s/%s%s}" % (self._sie_module_name, self._sie_object_name,
"" if self._sie_object_name == self._sie_preferred_varname else " as %s" % self._sie_preferred_varname)
def _sie_is_simple(self):
r"""
Report that :class:`SIE_import_name` values are single tokens.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.import_name('sage.rings.integer', 'make_integer')._sie_is_simple()
True
"""
return True
def _sie_prepare(self, sif):
r"""
We override the \method{_sie_prepare} method from
:class:`SageInputExpression` to request a variable name.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.import_name('sage.rings.integer', 'make_integer')
sage: sie._sie_requested_varname
False
sage: sie._sie_prepare(sif)
sage: sie._sie_requested_varname
True
"""
super(SIE_import_name, self)._sie_prepare(sif)
self._sie_require_varname(sif)
def _sie_format(self, sif):
r"""
Return the formatted string value of this import,
and an indication that it is atomic.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: v1 = sib.import_name('sage.rings.integer', 'make_integer')
sage: v2 = sib.import_name('sage.foo', 'happy', 'sad')
sage: sie = v1(v2)
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
('make_integer(sad)', 40)
sage: print sif._commands
from sage.rings.integer import make_integer
from sage.foo import happy as sad
"""
name = self._sie_get_varname(sif)
if self._sie_formatted:
# Only run the import command once
return name, _prec_atomic
self._sie_formatted = True
rename = ''
if name != self._sie_object_name:
rename = ' as ' + name
sif._commands += 'from %s import %s%s\n' % (self._sie_module_name,
self._sie_object_name,
rename)
return name, _prec_atomic
class SIE_assign(SageInputExpression):
r"""
This class represents an assignment command.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.assign(sib.name('foo').x, sib.name('pi'))
{assign: {getattr: {atomic:foo}.x} {atomic:pi}}
"""
def __init__(self, sib, lhs, rhs):
r"""
Initializes an instance of :class:`SIE_assign`.
INPUT:
- ``sib`` - a :class:`SageInputBuilder`
- ``lhs`` - the left-hand side of the assignment
- ``rhs`` - the right-hand side of the assignment
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.assign(sib.name('foo').x, sib.name('pi'))
{assign: {getattr: {atomic:foo}.x} {atomic:pi}}
"""
super(SIE_assign, self).__init__(sib)
self._sie_lhs = lhs
self._sie_rhs = rhs
def __repr__(self):
r"""
Returns a string representing this :class:`SIE_assign` command.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sib.assign(sib.name('foo').x, sib.name('pi'))
{assign: {getattr: {atomic:foo}.x} {atomic:pi}}
"""
return "{assign: %s %s}" % (repr(self._sie_lhs), repr(self._sie_rhs))
def _sie_referenced(self):
r"""
Returns a list of the immediate subexpressions of this
:class:`SIE_assign`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder
sage: sib = SageInputBuilder()
sage: sie = sib.assign(sib.name('foo').x, sib.name('pi'))
sage: sie._sie_referenced()
[{getattr: {atomic:foo}.x}, {atomic:pi}]
"""
return [self._sie_lhs, self._sie_rhs]
def _sie_format(self, sif):
r"""
Return the formatted string value of this :class:`SIE_assign`
as an expression. Since an assignment is a statement, not
an expression, always raises an error.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.assign(sib.name('foo').x, sib.name('pi'))
sage: sie._sie_prepare(sif)
sage: sie._sie_format(sif)
Traceback (most recent call last):
...
ValueError: Cannot format SIE_assign as expression
"""
raise ValueError("Cannot format SIE_assign as expression")
def _sie_format_statement(self, sif):
r"""
Return the formatted string of this :class:`SIE_assign`
as a statement.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.assign(sib.name('foo').x, sib.name('pi'))
sage: sie._sie_prepare(sif)
sage: sie._sie_format_statement(sif)
'foo.x = pi'
"""
return '%s = %s' % (sif.format(self._sie_lhs, 0), sif.format(self._sie_rhs, 0))
class SageInputFormatter:
r"""
An instance of this class is used to keep track of variable names
and a sequence of generated commands during the :func:`sage_input`
formatting process.
"""
def __init__(self):
r"""
Initialize an instance of :class:`SageInputFormatter`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputFormatter
sage: sif = SageInputFormatter()
"""
self._commands = ''
self._names = set()
self._dup_names = {}
def format(self, e, prec):
r"""
Format a Sage input expression into a string.
INPUT:
- ``e`` - a :class:`SageInputExpression`
- ``prec`` - an integer representing a precedence level
First, we check to see if ``e`` should be replaced by a variable.
If so, we generate the command to assign the variable, and return
the name of the variable.
Otherwise, we format the expression by calling its \method{_sie_format}
method, and add parentheses if necessary.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib(GF(5))
Here we ``cheat`` by calling \method{_sie_prepare} twice, to make it
use a variable.::
sage: sie._sie_prepare(sif)
sage: sie._sie_prepare(sif)
sage: sif._commands
''
sage: sif.format(sie, 0)
'GF_5'
sage: sif._commands
'GF_5 = GF(5)\n'
We demonstrate the use of commands, by showing how to construct
code that will produce a random matrix::
sage: sib = SageInputBuilder()
sage: sif = SageInputFormatter()
sage: sie = sib.name('matrix')(sib.name('ZZ'), 10, 10)
sage: sib.command(sie, sie.randomize())
sage: sie._sie_prepare(sif)
sage: sif._commands
''
sage: sif.format(sie, 0)
'si'
sage: sif._commands
'si = matrix(ZZ, 10, 10)\nsi.randomize()\n'
"""
if e._sie_use_var:
if not e._sie_generated:
s, _ = e._sie_format(self)
# In complicated situations, this can get called
# recursively...
if not e._sie_generated:
self._commands += '%s = %s\n' % (e._sie_get_varname(self), s)
e._sie_generated = True
formatted = e._sie_get_varname(self)
else:
s, iprec = e._sie_format(self)
if iprec < prec:
s = '(' + s + ')'
formatted = s
commands = e._sie_commands
e._sie_commands = []
for cmd in commands:
s_cmd = cmd._sie_format_statement(self)
self._commands += s_cmd + '\n'
return formatted
def register_name(self, name):
r"""
Register that some value would like to use a given name.
If only one request for a name is received, then we will use the
requested name; otherwise, we will add numbers to the end of the
name to make it unique.
If the input name is ``None``, then it is treated as a name of
``'si'``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputFormatter
sage: sif = SageInputFormatter()
sage: sif._names, sif._dup_names
(set(), {})
sage: sif.register_name('x')
sage: sif.register_name('y')
sage: sif._names, sif._dup_names
({'x', 'y'}, {})
sage: sif.register_name('x')
sage: sif._names, sif._dup_names
({'x', 'y'}, {'x': 0})
"""
if name is None: name = 'si'
if name in self._names:
self._dup_names[name] = 0
else:
self._names.add(name)
def get_name(self, name):
r"""
Return a name corresponding to a given requested name.
If only one request for a name is received, then we will use the
requested name; otherwise, we will add numbers to the end of the
name to make it unique.
If the input name is ``None``, then it is treated as a name of
``'si'``.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputFormatter
sage: sif = SageInputFormatter()
sage: names = ('x', 'x', 'y', 'z')
sage: for n in names: sif.register_name(n)
sage: for n in names: sif.get_name(n)
'x1'
'x2'
'y'
'z'
"""
if name is None: name = 'si'
if name in self._dup_names:
next = self._dup_names[name] + 1
self._dup_names[name] = next
return name + str(next)
else:
return name
def verify_same(a, b):
r"""
Verify that two Sage values are the same. This is an extended equality
test; it checks that the values are equal and that their parents are equal.
(For values which are not Elements, the types are checked instead.)
If the values are the same, we return ``None``; otherwise,
we raise an exception.
EXAMPLES::
sage: from sage.misc.sage_input import verify_same
sage: verify_same(1, 1)
sage: verify_same(1, 2)
Traceback (most recent call last):
...
AssertionError: Expected 1 == 2
sage: verify_same(1, 1r)
Traceback (most recent call last):
...
AttributeError: 'int' object has no attribute 'parent'
sage: verify_same(1r, 1)
Traceback (most recent call last):
...
assert(type(a) == type(b))
AssertionError
sage: verify_same(5, GF(7)(5))
Traceback (most recent call last):
...
assert(a.parent() == b.parent())
AssertionError
"""
from sage.structure.element import is_Element
if is_Element(a):
assert(a.parent() == b.parent())
else:
assert(type(a) is type(b))
if isinstance(a, float):
# The IEEE floating-point standard recommends that NaN != NaN
# Sage doesn't do this for RDF or RR, but Python does for floats.
# So we need to consider the cases: a is/is not NaN, b is/is not NaN.
if not (a == a):
# a is a NaN; so confirm that b is a NaN
assert not (b == b)
else:
# a is not NaN. If b is NaN, then the assertion will fail.
assert a == b
return
from sage.rings.real_mpfi import is_RealIntervalFieldElement
from sage.rings.complex_interval import is_ComplexIntervalFieldElement
if is_RealIntervalFieldElement(a) or is_ComplexIntervalFieldElement(a):
assert(cmp(a, b) == 0), "Expected %s == %s" % (a, b)
else:
assert(a == b), "Expected %s == %s" % (a, b)
def verify_si_answer(x, answer, preparse):
r"""
Verify that evaluating ``answer`` gives a value equal to ``x``
(with the same parent/type). If ``preparse`` is ``True`` or
``False``, then we evaluate ``answer`` with the preparser
enabled or disabled, respectively; if ``preparse`` is ``None``,
then we evaluate ``answer`` both with the preparser enabled and
disabled and check both results.
On success, we return ``None``; on failure, we raise an exception.
INPUT:
- ``x`` - an arbitrary Sage value
- ``answer`` - a string, or a :class:`SageInputAnswer`
- ``preparse`` -- ``True``, ``False``, or ``None``
EXAMPLES::
sage: from sage.misc.sage_input import verify_si_answer
sage: verify_si_answer(1, '1', True)
sage: verify_si_answer(1, '1', False)
Traceback (most recent call last):
...
AttributeError: 'int' object has no attribute 'parent'
sage: verify_si_answer(1, 'ZZ(1)', None)
"""
from sage.misc.sage_eval import sage_eval
if preparse is None:
verify_same(x, sage_eval(answer, preparse=True))
verify_same(x, sage_eval(answer, preparse=False))
else:
verify_same(x, sage_eval(answer, preparse=preparse))
class SageInputAnswer(tuple):
r"""
This class inherits from tuple, so it acts like a tuple when passed
to :func:`sage_eval`; but it prints as a sequence of commands.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputAnswer
sage: v = SageInputAnswer('x = 22\n', 'x/7'); v
x = 22
x/7
sage: isinstance(v, tuple)
True
sage: v[0]
'x = 22\n'
sage: v[1]
'x/7'
sage: len(v)
2
sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v
LOCALS:
sin: <built-in function sin>
sin(3.14)
sage: v[0]
''
sage: v[1]
'sin(3.14)'
sage: v[2]
{'sin': <built-in function sin>}
"""
def __new__(cls, cmds, expr, locals=None):
r"""
Construct an instance of :class:`SageInputAnswer`.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputAnswer
sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v
LOCALS:
sin: <built-in function sin>
sin(3.14)
sage: v[0]
''
sage: v[1]
'sin(3.14)'
sage: v[2]
{'sin': <built-in function sin>}
"""
if locals:
return tuple.__new__(cls, (cmds, expr, locals))
else:
return tuple.__new__(cls, (cmds, expr))
def __repr__(self):
r"""
Return a string representation for a :class:`SageInputAnswer`,
such that if you evaluate this :class:`SageInputAnswer` at the
\sage command line, you get a result in a nice form ready to
copy-and-paste.
EXAMPLES::
sage: from sage.misc.sage_input import SageInputAnswer
sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v
LOCALS:
sin: <built-in function sin>
sin(3.14)
"""
if len(self) == 2:
return self[0] + self[1]
locals = self[2]
locals_text = ''.join(' %s: %r\n' % (k, v) for k, v in locals.iteritems())
return 'LOCALS:\n' + locals_text + self[0] + self[1]
| 33.624757
| 151
| 0.56051
|
But since we store x, that can't happen;
self._id_cache[id(x)] = (x, sie)
sie._sie_preferred_varname = name
def import_name(self, module, name, alt_name=None):
return SIE_import_name(self, module, name, alt_name)
def assign(self, e, val):
e = self(e)
val = self(val)
return SIE_assign(self, e, val)
def command(self, v, cmd):
v = self(v)
cmd = self(cmd)
v._sie_commands.append(cmd)
def dict(self, entries):
if isinstance(entries, dict):
entries = list(entries.items())
entries = [(self(key),self(val)) for (key,val) in entries]
return SIE_dict(self, entries)
def getattr(self, sie, attr):
return SIE_getattr(self, self(sie), attr)
def empty_subscript(self, parent):
return SIE_subscript(self, parent, None)
def use_variable(self, sie, name):
sie._sie_preferred_varname = name
sie._sie_request_use_var = True
def share(self, sie):
sie._sie_share = True
def parent_with_gens(self, parent, sie, gen_names, name, gens_syntax=None):
v = SIE_gens_constructor(self, sie, gen_names, gens_syntax=gens_syntax)
self.cache(parent, v, name)
gens = [SIE_gen(self, v, n) for n in gen_names]
self._parent_gens[parent] = gens
v._sie_gens = gens
return v
def gen(self, parent, n=0):
if not parent in self._parent_gens:
self(parent)
if not parent in self._parent_gens:
raise ValueError("{} did not register generators for sage_input".format(parent))
gens = self._parent_gens[parent]
if n > len(gens):
raise ValueError("{} registered only {} generators for sage_input".format(parent, len(gens)))
return gens[n]
def prod(self, factors, simplify=False):
neg = False
factors = [self(factor) for factor in factors]
if simplify:
i = 0
while i < len(factors):
factor = factors[i]
while isinstance(factor, SIE_unary) and factor._sie_op == '-':
neg = not neg
factor = factor._sie_operand
factors[i] = factor
if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '0':
factors = [factor]
neg = False
break
if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '1':
factors[i:i+1] = []
else:
i += 1
if len(factors) == 0:
factors.append(SIE_literal_stringrep(self, '1'))
prod = factors[0]
for factor in factors[1:]:
prod = prod * factor
if neg:
prod = -prod
return prod
def sum(self, terms, simplify=False):
terms = [self(term) for term in terms]
if simplify:
i = 0
while i < len(terms):
term = terms[i]
if isinstance(term, SIE_literal_stringrep) and term._sie_value == '0':
terms[i:i+1] = []
else:
i += 1
if len(terms) == 0:
terms.append(SIE_literal_stringrep(self, '0'))
sum = terms[0]
for term in terms[1:]:
negate = False
while simplify and isinstance(term, SIE_unary) and term._sie_op == '-':
negate = not negate
term = term._sie_operand
if negate:
sum = sum - term
else:
sum = sum + term
return sum
def result(self, e):
sif = SageInputFormatter()
# Even if use_variable was called on e, don't automatically
e._sie_request_use_var = False
e._sie_prepare(sif)
s = sif.format(e, 0)
locals = self._locals
if len(locals):
return SageInputAnswer(sif._commands, sif.format(e, 0), locals)
else:
return SageInputAnswer(sif._commands, sif.format(e, 0))
# the Python reference manual.
_prec_lambda = 2
_prec_or = 4
_prec_and = 6
_prec_not = 8
_prec_membership = 10
_prec_identity = 12
_prec_comparison = 14
_prec_bitor = 16
_prec_bitxor = 18
_prec_bitand = 20
_prec_shift = 22
_prec_addsub = 24
_prec_muldiv = 26
_prec_negate = 28
_prec_bitnot = 30
_prec_exponent = 32
_prec_attribute = 34
_prec_subscript = 36
_prec_slicing = 38
_prec_funcall = 40
_prec_atomic = 42
class SageInputExpression(object):
def __init__(self, sib):
self._sie_refcount = 0
self._sie_builder = sib
self._sie_context = None
self._sie_preferred_varname = None
self._sie_varname = None
self._sie_request_use_var = False
self._sie_use_var = False
self._sie_requested_varname = False
self._sie_commands = []
def _sie_is_simple(self):
return False
def _sie_referenced(self):
return []
def _sie_prepare(self, sif):
if self._sie_context is not sif:
self._sie_context = sif
self._sie_refcount = 0
self._sie_refcount += 1
if self._sie_request_use_var:
self._sie_require_varname(sif)
self._sie_use_var = True
if not self._sie_is_simple():
if self._sie_refcount == 2:
self._sie_require_varname(sif)
self._sie_use_var = True
if self._sie_refcount == 1:
for r in self._sie_referenced():
r._sie_prepare(sif)
for r in self._sie_commands:
r._sie_prepare(sif)
def _sie_require_varname(self, sif):
if not self._sie_requested_varname:
sif.register_name(self._sie_preferred_varname)
self._sie_requested_varname = True
self._sie_generated = False
def _sie_get_varname(self, sif):
if self._sie_varname is None:
self._sie_varname = sif.get_name(self._sie_preferred_varname)
return self._sie_varname
def _sie_is_negation(self):
return False
def __call__(self, *args, **kwargs):
args = [self._sie_builder(_) for _ in args]
for k in kwargs:
kwargs[k] = self._sie_builder(kwargs[k])
return SIE_call(self._sie_builder, self, args, kwargs)
def __getitem__(self, key):
skey = self._sie_builder(key)
return SIE_subscript(self._sie_builder, self, skey)
def __getattr__(self, attr):
return SIE_getattr(self._sie_builder, self, attr)
def _rich_repr_(self, display_manager, **kwds):
return None
def __pow__(self, other):
return self._sie_binop('**', other)
def __mul__(self, other):
return self._sie_binop('*', other)
def __div__(self, other):
return self._sie_binop('/', other)
def __add__(self, other):
return self._sie_binop('+', other)
def __sub__(self, other):
return self._sie_binop('-', other)
def _sie_binop(self, op, other):
return SIE_binary(self._sie_builder, op, self, self._sie_builder(other))
def __neg__(self):
return self._sie_unop('-')
def __invert__(self):
return self._sie_unop('~')
def __abs__(self):
return self._sie_builder.name('abs')(self)
def _sie_unop(self, op):
return SIE_unary(self._sie_builder, op, self)
def _sie_format(self, sif):
raise NotImplementedError
def _sie_format_statement(self, sif):
result, prec = self._sie_format(sif)
return result
class SIE_literal(SageInputExpression):
def _sie_is_simple(self):
# Perhaps this should actually look at the formatted length of self,
# and sometimes return false? If some 50-digit integer occurs multiple
# times in an expression, it might be better to do the replacement.
return not self._sie_share
class SIE_literal_stringrep(SIE_literal):
def __init__(self, sib, n):
super(SIE_literal_stringrep, self).__init__(sib)
self._sie_value = str(n)
self._sie_share = False
def __repr__(self):
return "{atomic:%s}" % self._sie_value
def _sie_format(self, sif):
return self._sie_value, _prec_atomic
class SIE_call(SageInputExpression):
def __init__(self, sib, func, args, kwargs):
super(SIE_call, self).__init__(sib)
self._sie_func = func
self._sie_args = args
self._sie_kwargs = kwargs
def __repr__(self):
func = repr(self._sie_func)
args = [repr(arg) for arg in self._sie_args]
kwargs = sorted(k + '=' + repr(v) for k, v in self._sie_kwargs.iteritems())
all_args = ', '.join(args + kwargs)
return "{call: %s(%s)}" % (func, all_args)
def _sie_referenced(self):
refs = self._sie_args[:]
refs.append(self._sie_func)
refs.extend(self._sie_kwargs.itervalues())
return refs
def _sie_format(self, sif):
func = sif.format(self._sie_func, _prec_attribute)
args = [sif.format(arg, 0) for arg in self._sie_args]
kwargs = sorted(k + '=' + sif.format(v, 0) for k, v in self._sie_kwargs.iteritems())
all_args = ', '.join(args + kwargs)
return ('%s(%s)' % (func, all_args), _prec_funcall)
class SIE_subscript(SageInputExpression):
def __init__(self, sib, coll, key):
super(SIE_subscript, self).__init__(sib)
self._sie_coll = coll
self._sie_key = key
def __repr__(self):
coll = repr(self._sie_coll)
if self._sie_key is None:
key = ''
else:
key = repr(self._sie_key)
return "{subscr: %s[%s]}" % (coll, key)
def _sie_referenced(self):
refs = [self._sie_coll]
if self._sie_key is not None:
refs.append(self._sie_key)
return refs
def _sie_format(self, sif):
coll = sif.format(self._sie_coll, _prec_attribute)
if self._sie_key is None:
key = ''
else:
key = sif.format(self._sie_key, 0)
return '%s[%s]' % (coll, key), _prec_subscript
class SIE_getattr(SageInputExpression):
def __init__(self, sib, obj, attr):
super(SIE_getattr, self).__init__(sib)
self._sie_obj = obj
self._sie_attr = attr
def __repr__(self):
obj = repr(self._sie_obj)
return "{getattr: %s.%s}" % (obj, self._sie_attr)
def _sie_referenced(self):
return [self._sie_obj]
def _sie_format(self, sif):
obj = sif.format(self._sie_obj, _prec_exponent)
return '%s.%s' % (obj, self._sie_attr), _prec_attribute
class SIE_tuple(SageInputExpression):
def __init__(self, sib, values, is_list):
super(SIE_tuple, self).__init__(sib)
self._sie_values = values
self._sie_is_list = is_list
def __repr__(self):
kind = "list" if self._sie_is_list else "tuple"
return "{%s: (%s)}" % \
(kind, ', '.join([repr(v) for v in self._sie_values]))
def _sie_referenced(self):
return self._sie_values
def _sie_format(self, sif):
values = [sif.format(val, 0) for val in self._sie_values]
if self._sie_is_list:
return '[%s]' % ', '.join(values), _prec_atomic
else:
if len(values) == 1:
return '(%s,)' % values[0], _prec_atomic
else:
return '(%s)' % ', '.join(values), _prec_atomic
class SIE_dict(SageInputExpression):
def __init__(self, sib, entries):
super(SIE_dict, self).__init__(sib)
self._sie_entries = entries
def __repr__(self):
return "{dict: {%s}}" % \
', '.join([repr(key) + ':' + repr(val)
for key,val in self._sie_entries])
def _sie_referenced(self):
return [k for k,v in self._sie_entries] + [v for k,v in self._sie_entries]
def _sie_format(self, sif):
return "{%s}" %\
', '.join(sif.format(k, 0)+':'+sif.format(v, 0) for k,v in self._sie_entries), _prec_atomic
class SIE_binary(SageInputExpression):
def __init__(self, sib, op, lhs, rhs):
super(SIE_binary, self).__init__(sib)
self._sie_op = op
self._sie_operands = (lhs, rhs)
def __repr__(self):
return "{binop:%s %s %s}" % (self._sie_op, repr(self._sie_operands[0]), repr(self._sie_operands[1]))
def _sie_referenced(self):
return self._sie_operands
def _sie_format(self, sif):
op = self._sie_op
fop = op
if op == '**':
lhs = sif.format(self._sie_operands[0], _prec_exponent+1)
rhs = sif.format(self._sie_operands[1], _prec_exponent)
if self._sie_builder.preparse():
return '%s^%s' % (lhs, rhs), _prec_exponent
else:
return '%s**%s' % (lhs, rhs), _prec_exponent
if op == '*':
prec = _prec_muldiv
elif op == '/':
prec = _prec_muldiv
elif op == '+':
fop = ' + '
prec = _prec_addsub
elif op == '-':
fop = ' - '
prec = _prec_addsub
else:
raise ValueError('Unhandled op {} in SIE_binary'.format(op))
lhs = sif.format(self._sie_operands[0], prec)
rhs = sif.format(self._sie_operands[1], prec+1)
return '%s%s%s' % (lhs, fop, rhs), prec
class SIE_unary(SageInputExpression):
def __init__(self, sib, op, operand):
super(SIE_unary, self).__init__(sib)
self._sie_op = op
self._sie_operand = operand
def __repr__(self):
return "{unop:%s %s}" % (self._sie_op, repr(self._sie_operand))
def _sie_referenced(self):
return [self._sie_operand]
def _sie_format(self, sif):
op = self._sie_op
fop = op
rprec = None
if op == '-':
# We print -(a*b) as -a*b, even though that will parse as
# (-a)*b.
prec = _prec_muldiv
rprec = _prec_negate
elif op == '~':
prec = _prec_bitnot
else:
raise ValueError('Unhandled op {} in SIE_unary'.format(op))
if rprec is None: rprec = prec
return '%s%s' % (fop, sif.format(self._sie_operand, prec)), rprec
def _sie_is_negation(self):
return self._sie_op == '-'
class SIE_gens_constructor(SageInputExpression):
def __init__(self, sib, constr, gen_names, gens_syntax=None):
super(SIE_gens_constructor, self).__init__(sib)
self._sie_constr = constr
self._sie_gen_names = gen_names
self._sie_gens = None # will be overwritten from .parent_with_gens()
self._sie_gens_constr = gens_syntax
self._sie_assign_gens = False
self._sie_generated = False
def __repr__(self):
return "{constr_parent: %s with gens: %s}" % (repr(self._sie_constr), self._sie_gen_names)
def _sie_referenced(self):
# This is used to determine if some expressions should be replaced
# by variables (if the expression has more than one parent in
# the expression DAG). We assume that all expressions in
# self._sie_gens_constr also occur in self._sie_constr.
return [self._sie_constr]
def _sie_gens_referenced(self, sif):
self._sie_assign_gens = True
self._sie_require_varname(sif)
for gen in self._sie_gens:
gen._sie_require_varname(sif)
def _sie_add_command(self, sif):
if not self._sie_generated:
if self._sie_builder.preparse() and \
self._sie_gens_constr is not None and \
all(g._sie_got_preferred(sif) for g in self._sie_gens):
s, _ = self._sie_gens_constr._sie_format(sif)
sif._commands += '%s.<%s> = %s\n' % (self._sie_get_varname(sif), ','.join(self._sie_gen_names), s)
else:
s, _ = self._sie_constr._sie_format(sif)
sif._commands += '%s = %s\n' % (self._sie_get_varname(sif), s)
if self._sie_assign_gens:
if len(self._sie_gens) == 1:
sif._commands += '%s = %s.gen()\n' % (self._sie_gens[0]._sie_get_varname(sif), self._sie_get_varname(sif))
else:
sif._commands += '%s = %s.gens()\n' % (','.join([g._sie_get_varname(sif) for g in self._sie_gens]), self._sie_get_varname(sif))
self._sie_generated = True
def _sie_format(self, sif):
if self._sie_assign_gens:
self._sie_add_command(sif)
return self._sie_get_varname(sif), _prec_atomic
return self._sie_constr._sie_format(sif)
class SIE_gen(SageInputExpression):
def __init__(self, sib, parent, name):
super(SIE_gen, self).__init__(sib)
self._sie_parent = parent
self._sie_preferred_varname = name
def __repr__(self):
return "{gen:%s %s}" % (self._sie_preferred_varname, repr(self._sie_parent))
def _sie_is_simple(self):
return True
def _sie_prepare(self, sif):
super(SIE_gen, self)._sie_prepare(sif)
self._sie_parent._sie_gens_referenced(sif)
def _sie_format(self, sif):
self._sie_parent._sie_add_command(sif)
return self._sie_get_varname(sif), _prec_atomic
def _sie_got_preferred(self, sif):
return self._sie_get_varname(sif) == self._sie_preferred_varname
class SIE_import_name(SageInputExpression):
def __init__(self, sib, module, name, alt_name=None):
super(SIE_import_name, self).__init__(sib)
self._sie_formatted = False
self._sie_module_name = module
self._sie_object_name = name
if alt_name is None:
self._sie_preferred_varname = name
else:
self._sie_preferred_varname = alt_name
def __repr__(self):
return "{import:%s/%s%s}" % (self._sie_module_name, self._sie_object_name,
"" if self._sie_object_name == self._sie_preferred_varname else " as %s" % self._sie_preferred_varname)
def _sie_is_simple(self):
return True
def _sie_prepare(self, sif):
super(SIE_import_name, self)._sie_prepare(sif)
self._sie_require_varname(sif)
def _sie_format(self, sif):
name = self._sie_get_varname(sif)
if self._sie_formatted:
# Only run the import command once
return name, _prec_atomic
self._sie_formatted = True
rename = ''
if name != self._sie_object_name:
rename = ' as ' + name
sif._commands += 'from %s import %s%s\n' % (self._sie_module_name,
self._sie_object_name,
rename)
return name, _prec_atomic
class SIE_assign(SageInputExpression):
def __init__(self, sib, lhs, rhs):
super(SIE_assign, self).__init__(sib)
self._sie_lhs = lhs
self._sie_rhs = rhs
def __repr__(self):
return "{assign: %s %s}" % (repr(self._sie_lhs), repr(self._sie_rhs))
def _sie_referenced(self):
return [self._sie_lhs, self._sie_rhs]
def _sie_format(self, sif):
raise ValueError("Cannot format SIE_assign as expression")
def _sie_format_statement(self, sif):
return '%s = %s' % (sif.format(self._sie_lhs, 0), sif.format(self._sie_rhs, 0))
class SageInputFormatter:
def __init__(self):
self._commands = ''
self._names = set()
self._dup_names = {}
def format(self, e, prec):
if e._sie_use_var:
if not e._sie_generated:
s, _ = e._sie_format(self)
# In complicated situations, this can get called
# recursively...
if not e._sie_generated:
self._commands += '%s = %s\n' % (e._sie_get_varname(self), s)
e._sie_generated = True
formatted = e._sie_get_varname(self)
else:
s, iprec = e._sie_format(self)
if iprec < prec:
s = '(' + s + ')'
formatted = s
commands = e._sie_commands
e._sie_commands = []
for cmd in commands:
s_cmd = cmd._sie_format_statement(self)
self._commands += s_cmd + '\n'
return formatted
def register_name(self, name):
if name is None: name = 'si'
if name in self._names:
self._dup_names[name] = 0
else:
self._names.add(name)
def get_name(self, name):
if name is None: name = 'si'
if name in self._dup_names:
next = self._dup_names[name] + 1
self._dup_names[name] = next
return name + str(next)
else:
return name
def verify_same(a, b):
from sage.structure.element import is_Element
if is_Element(a):
assert(a.parent() == b.parent())
else:
assert(type(a) is type(b))
if isinstance(a, float):
# The IEEE floating-point standard recommends that NaN != NaN
# Sage doesn't do this for RDF or RR, but Python does for floats.
if not (a == a):
assert not (b == b)
else:
assert a == b
return
from sage.rings.real_mpfi import is_RealIntervalFieldElement
from sage.rings.complex_interval import is_ComplexIntervalFieldElement
if is_RealIntervalFieldElement(a) or is_ComplexIntervalFieldElement(a):
assert(cmp(a, b) == 0), "Expected %s == %s" % (a, b)
else:
assert(a == b), "Expected %s == %s" % (a, b)
def verify_si_answer(x, answer, preparse):
from sage.misc.sage_eval import sage_eval
if preparse is None:
verify_same(x, sage_eval(answer, preparse=True))
verify_same(x, sage_eval(answer, preparse=False))
else:
verify_same(x, sage_eval(answer, preparse=preparse))
class SageInputAnswer(tuple):
def __new__(cls, cmds, expr, locals=None):
if locals:
return tuple.__new__(cls, (cmds, expr, locals))
else:
return tuple.__new__(cls, (cmds, expr))
def __repr__(self):
if len(self) == 2:
return self[0] + self[1]
locals = self[2]
locals_text = ''.join(' %s: %r\n' % (k, v) for k, v in locals.iteritems())
return 'LOCALS:\n' + locals_text + self[0] + self[1]
| true
| true
|
f716d301aba4afe90bc5d5c6ec197a440bdc19f2
| 3,047
|
py
|
Python
|
contrib/testgen/base58.py
|
Trackerming/bitcoin-sv
|
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
|
[
"MIT"
] | 35
|
2019-02-23T06:21:13.000Z
|
2021-11-15T11:35:13.000Z
|
contrib/testgen/base58.py
|
Chihuataneo/bitcoin-sv
|
d9b12a23dbf0d2afc5f488fa077d762b302ba873
|
[
"MIT"
] | 60
|
2019-02-25T18:17:03.000Z
|
2021-07-13T00:14:00.000Z
|
contrib/testgen/base58.py
|
Chihuataneo/bitcoin-sv
|
d9b12a23dbf0d2afc5f488fa077d762b302ba873
|
[
"MIT"
] | 24
|
2019-02-20T05:37:02.000Z
|
2021-10-29T18:42:10.000Z
|
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes((n,))
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0] * nPad) + result
def b58decode(v, length=None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
else:
break
result = chr(0) * nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr) != 21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| 24.772358
| 97
| 0.624549
|
import hashlib
class SHA256:
new = hashlib.sha256
if str != bytes:
def ord(c):
return c
def chr(n):
return bytes((n,))
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0] * nPad) + result
def b58decode(v, length=None):
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
else:
break
result = chr(0) * nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
return b58encode(v + checksum(v))
def b58decode_chk(v):
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
addr = b58decode_chk(strAddress)
if addr is None or len(addr) != 21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| true
| true
|
f716d420b906d891f1c046d6c7abb726027eaa2b
| 4,749
|
py
|
Python
|
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
|
jveverka/data-lab
|
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
|
[
"Apache-2.0"
] | null | null | null |
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
|
jveverka/data-lab
|
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
|
[
"Apache-2.0"
] | 6
|
2019-12-07T09:53:26.000Z
|
2020-05-21T19:52:27.000Z
|
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
|
jveverka/data-lab
|
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
|
[
"Apache-2.0"
] | null | null | null |
from absl import logging
import numpy as np
import tensorflow as tf
import cv2
YOLOV3_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
'yolo_conv_2',
'yolo_output_2',
]
YOLOV3_TINY_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
]
def load_darknet_weights(model, weights_file, tiny=False):
wf = open(weights_file, 'rb')
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
if tiny:
layers = YOLOV3_TINY_LAYER_LIST
else:
layers = YOLOV3_LAYER_LIST
for layer_name in layers:
sub_model = model.get_layer(layer_name)
for i, layer in enumerate(sub_model.layers):
if not layer.name.startswith('conv2d'):
continue
batch_norm = None
if i + 1 < len(sub_model.layers) and \
sub_model.layers[i + 1].name.startswith('batch_norm'):
batch_norm = sub_model.layers[i + 1]
logging.info("{}/{} {}".format(
sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))
filters = layer.filters
size = layer.kernel_size[0]
in_dim = layer.input_shape[-1]
if batch_norm is None:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
else:
# darknet [beta, gamma, mean, variance]
bn_weights = np.fromfile(
wf, dtype=np.float32, count=4 * filters)
# tf [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(
wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(
conv_shape).transpose([2, 3, 1, 0])
if batch_norm is None:
layer.set_weights([conv_weights, conv_bias])
else:
layer.set_weights([conv_weights])
batch_norm.set_weights(bn_weights)
assert len(wf.read()) == 0, 'failed to read all data'
wf.close()
def broadcast_iou(box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
def draw_outputs(img, outputs, class_names):
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, '{} {:.4f}'.format(
class_names[int(classes[i])], objectness[i]),
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
def draw_labels(x, y, class_names):
img = x.numpy()
boxes, classes = tf.split(y, (4, 1), axis=-1)
classes = classes[..., 0]
wh = np.flip(img.shape[0:2])
for i in range(len(boxes)):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, class_names[classes[i]],
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 2)
return img
def freeze_all(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_all(l, frozen)
| 35.177778
| 83
| 0.561592
|
from absl import logging
import numpy as np
import tensorflow as tf
import cv2
YOLOV3_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
'yolo_conv_2',
'yolo_output_2',
]
YOLOV3_TINY_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
]
def load_darknet_weights(model, weights_file, tiny=False):
wf = open(weights_file, 'rb')
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
if tiny:
layers = YOLOV3_TINY_LAYER_LIST
else:
layers = YOLOV3_LAYER_LIST
for layer_name in layers:
sub_model = model.get_layer(layer_name)
for i, layer in enumerate(sub_model.layers):
if not layer.name.startswith('conv2d'):
continue
batch_norm = None
if i + 1 < len(sub_model.layers) and \
sub_model.layers[i + 1].name.startswith('batch_norm'):
batch_norm = sub_model.layers[i + 1]
logging.info("{}/{} {}".format(
sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))
filters = layer.filters
size = layer.kernel_size[0]
in_dim = layer.input_shape[-1]
if batch_norm is None:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
else:
bn_weights = np.fromfile(
wf, dtype=np.float32, count=4 * filters)
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(
wf, dtype=np.float32, count=np.product(conv_shape))
conv_weights = conv_weights.reshape(
conv_shape).transpose([2, 3, 1, 0])
if batch_norm is None:
layer.set_weights([conv_weights, conv_bias])
else:
layer.set_weights([conv_weights])
batch_norm.set_weights(bn_weights)
assert len(wf.read()) == 0, 'failed to read all data'
wf.close()
def broadcast_iou(box_1, box_2):
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
def draw_outputs(img, outputs, class_names):
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, '{} {:.4f}'.format(
class_names[int(classes[i])], objectness[i]),
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
def draw_labels(x, y, class_names):
img = x.numpy()
boxes, classes = tf.split(y, (4, 1), axis=-1)
classes = classes[..., 0]
wh = np.flip(img.shape[0:2])
for i in range(len(boxes)):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, class_names[classes[i]],
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 2)
return img
def freeze_all(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_all(l, frozen)
| true
| true
|
f716d445c1bcac531c48494bd19770afcbb198fa
| 1,377
|
py
|
Python
|
dags/tuto.py
|
setuk/docker-airflow
|
8741ac32094893e1cd56b8bd411d240f60453eb7
|
[
"Apache-2.0"
] | null | null | null |
dags/tuto.py
|
setuk/docker-airflow
|
8741ac32094893e1cd56b8bd411d240f60453eb7
|
[
"Apache-2.0"
] | null | null | null |
dags/tuto.py
|
setuk/docker-airflow
|
8741ac32094893e1cd56b8bd411d240f60453eb7
|
[
"Apache-2.0"
] | null | null | null |
"""
Code that goes along with the Airflow located at:
http://airflow.readthedocs.org/en/latest/tutorial.html
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2020, 11, 19),
"email": ["airflow@airflow.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
# dag = DAG("tutorial", default_args=default_args, schedule_interval=timedelta(1))
dag = DAG("tutorial", default_args=default_args, schedule_interval=None)
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = BashOperator(task_id="print_date", bash_command="date", dag=dag)
t2 = BashOperator(task_id="sleep", bash_command="sleep 5", retries=3, dag=dag)
templated_command = """
{% for i in range(5) %}
echo "{{ ds }}"
echo "{{ macros.ds_add(ds, 7)}}"
echo "{{ params.my_param }}"
{% endfor %}
"""
t3 = BashOperator(
task_id="templated",
bash_command=templated_command,
params={"my_param": "Parameter I passed in"},
dag=dag,
)
t2.set_upstream(t1)
t3.set_upstream(t1)
| 27.54
| 82
| 0.67175
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2020, 11, 19),
"email": ["airflow@airflow.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
dag = DAG("tutorial", default_args=default_args, schedule_interval=None)
t1 = BashOperator(task_id="print_date", bash_command="date", dag=dag)
t2 = BashOperator(task_id="sleep", bash_command="sleep 5", retries=3, dag=dag)
templated_command = """
{% for i in range(5) %}
echo "{{ ds }}"
echo "{{ macros.ds_add(ds, 7)}}"
echo "{{ params.my_param }}"
{% endfor %}
"""
t3 = BashOperator(
task_id="templated",
bash_command=templated_command,
params={"my_param": "Parameter I passed in"},
dag=dag,
)
t2.set_upstream(t1)
t3.set_upstream(t1)
| true
| true
|
f716d497d29ec78b5afc1dd07eb0f92340ba179b
| 8,724
|
py
|
Python
|
benchmark/cloud/aws/kylin.py
|
ChenYi015/Raven
|
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
|
[
"Apache-2.0"
] | 1
|
2022-03-03T05:54:25.000Z
|
2022-03-03T05:54:25.000Z
|
benchmark/cloud/aws/kylin.py
|
ChenYi015/Raven
|
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
|
[
"Apache-2.0"
] | null | null | null |
benchmark/cloud/aws/kylin.py
|
ChenYi015/Raven
|
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
from typing import List
from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService
from benchmark.tools import get_random_id
logger = logging.getLogger()
class KylinMode:
ALL = 'all'
JOB = 'job'
QUERY = 'query'
class KylinMaster(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-master-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinMaster',
aws=aws,
region=region,
stack_name='Raven-Kylin-Master-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type
)
@property
def spark_master_url(self):
return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl')
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info('Kylin master is launching...')
super().launch()
logger.info('Kylin master has launched.')
def terminate(self):
logger.info('Kylin master is terminating...')
super().terminate()
logger.info('Kylin master has terminated.')
class KylinWorker(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str, worker_id: int = 1):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-worker-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinWorker',
aws=aws,
region=region,
stack_name=f'Raven-Kylin-Worker{worker_id}-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type,
KylinWorkerId=worker_id,
)
self._worker_id = worker_id
self._spark_master_private_ip = ''
@property
def worker_id(self):
return self._worker_id
@property
def spark_master_private_ip(self):
return self._spark_master_private_ip
@spark_master_private_ip.setter
def spark_master_private_ip(self, private_ip: str):
self._spark_master_private_ip = private_ip
self.kwargs['SparkMasterPrivateIp'] = private_ip
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info(f'Kylin worker {self._worker_id} is launching...')
super().launch()
logger.info(f'Kylin worker {self._worker_id} has launched.')
def terminate(self):
logger.info(f'Kylin worker {self._worker_id} is terminating...')
super().terminate()
logger.info(f'Kylin worker {self._worker_id} has terminated.')
class KylinCluster:
def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0,
worker_instance_type: str = 't2.small'):
self._aws = aws
self._master_instance_type = master_instance_type
self._worker_instance_type = worker_instance_type
self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type)
self._workers: List[KylinWorker] = [
KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in
range(0, worker_num)]
self._cluster_id = get_random_id(16)
@property
def master(self):
return self._master
@property
def workers(self):
return self._workers
def __str__(self):
return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})'
def launch(self):
logger.info('Kylin cluster is launching...')
self.master.launch()
threads: List[threading.Thread] = []
for worker in self.workers:
worker.spark_master_private_ip = self.master.private_ip
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has launched.')
def terminate(self):
logger.info('Kylin cluster is terminating...')
threads: List[threading.Thread] = []
for worker in self.workers:
thread = threading.Thread(target=worker.terminate)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.master.terminate()
logger.info('Kylin cluster has terminated.')
def install_cloud_watch_agent(self):
logger.debug('Kylin cluster is installing cloudwatch agent...')
threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)]
for worker in self.workers:
threads.append(threading.Thread(target=worker.install_cloudwatch_agent))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished installing cloudwatch agent.')
def collect_cluster_info(self, output_dir: str = None):
"""Collect kylin cluster information.
:param output_dir:
:return:
"""
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
info = {
'Master': self.master.to_dict(),
'Workers': [worker.to_dict() for worker in self.workers]
}
with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w',
encoding='utf-8') as file:
json.dump(info, file, indent=2)
def collect_metrics(self, output_dir: str = None):
logger.debug('Kylin cluster is pulling metrics cloudwatch agent...')
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
threads: List[threading.Thread] = [
threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})]
for worker in self.workers:
threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir}))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...')
def scale(self, worker_num: int):
logger.info('Kylin cluster is scaling...')
n = len(self.workers)
threads: List[threading.Thread] = []
if worker_num < n:
for worker_id in range(worker_num, n):
thread = threading.Thread(target=self.workers[worker_id].terminate)
thread.start()
threads.append(thread)
elif worker_num > n:
for worker_id in range(n, worker_num):
worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id)
worker.spark_master_private_ip = self.master.private_ip
self.workers.append(worker)
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has finished scaling.')
| 36.810127
| 118
| 0.634457
|
import json
import logging
import os
import threading
import time
from typing import List
from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService
from benchmark.tools import get_random_id
logger = logging.getLogger()
class KylinMode:
ALL = 'all'
JOB = 'job'
QUERY = 'query'
class KylinMaster(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-master-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinMaster',
aws=aws,
region=region,
stack_name='Raven-Kylin-Master-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type
)
@property
def spark_master_url(self):
return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl')
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info('Kylin master is launching...')
super().launch()
logger.info('Kylin master has launched.')
def terminate(self):
logger.info('Kylin master is terminating...')
super().terminate()
logger.info('Kylin master has terminated.')
class KylinWorker(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str, worker_id: int = 1):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-worker-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinWorker',
aws=aws,
region=region,
stack_name=f'Raven-Kylin-Worker{worker_id}-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type,
KylinWorkerId=worker_id,
)
self._worker_id = worker_id
self._spark_master_private_ip = ''
@property
def worker_id(self):
return self._worker_id
@property
def spark_master_private_ip(self):
return self._spark_master_private_ip
@spark_master_private_ip.setter
def spark_master_private_ip(self, private_ip: str):
self._spark_master_private_ip = private_ip
self.kwargs['SparkMasterPrivateIp'] = private_ip
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info(f'Kylin worker {self._worker_id} is launching...')
super().launch()
logger.info(f'Kylin worker {self._worker_id} has launched.')
def terminate(self):
logger.info(f'Kylin worker {self._worker_id} is terminating...')
super().terminate()
logger.info(f'Kylin worker {self._worker_id} has terminated.')
class KylinCluster:
def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0,
worker_instance_type: str = 't2.small'):
self._aws = aws
self._master_instance_type = master_instance_type
self._worker_instance_type = worker_instance_type
self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type)
self._workers: List[KylinWorker] = [
KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in
range(0, worker_num)]
self._cluster_id = get_random_id(16)
@property
def master(self):
return self._master
@property
def workers(self):
return self._workers
def __str__(self):
return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})'
def launch(self):
logger.info('Kylin cluster is launching...')
self.master.launch()
threads: List[threading.Thread] = []
for worker in self.workers:
worker.spark_master_private_ip = self.master.private_ip
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has launched.')
def terminate(self):
logger.info('Kylin cluster is terminating...')
threads: List[threading.Thread] = []
for worker in self.workers:
thread = threading.Thread(target=worker.terminate)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.master.terminate()
logger.info('Kylin cluster has terminated.')
def install_cloud_watch_agent(self):
logger.debug('Kylin cluster is installing cloudwatch agent...')
threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)]
for worker in self.workers:
threads.append(threading.Thread(target=worker.install_cloudwatch_agent))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished installing cloudwatch agent.')
def collect_cluster_info(self, output_dir: str = None):
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
info = {
'Master': self.master.to_dict(),
'Workers': [worker.to_dict() for worker in self.workers]
}
with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w',
encoding='utf-8') as file:
json.dump(info, file, indent=2)
def collect_metrics(self, output_dir: str = None):
logger.debug('Kylin cluster is pulling metrics cloudwatch agent...')
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
threads: List[threading.Thread] = [
threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})]
for worker in self.workers:
threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir}))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...')
def scale(self, worker_num: int):
logger.info('Kylin cluster is scaling...')
n = len(self.workers)
threads: List[threading.Thread] = []
if worker_num < n:
for worker_id in range(worker_num, n):
thread = threading.Thread(target=self.workers[worker_id].terminate)
thread.start()
threads.append(thread)
elif worker_num > n:
for worker_id in range(n, worker_num):
worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id)
worker.spark_master_private_ip = self.master.private_ip
self.workers.append(worker)
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has finished scaling.')
| true
| true
|
f716d4d8c9c6fc2d044b19e6dd1de0835c5c8e87
| 285
|
py
|
Python
|
Blog/sitemaps.py
|
myselfajp/MyFirstPage
|
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
|
[
"MIT"
] | null | null | null |
Blog/sitemaps.py
|
myselfajp/MyFirstPage
|
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
|
[
"MIT"
] | null | null | null |
Blog/sitemaps.py
|
myselfajp/MyFirstPage
|
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
|
[
"MIT"
] | null | null | null |
from django.contrib.sitemaps import Sitemap
from Blog.models import Post
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Post.objects.filter(status=True)
def lastmod(self, obj):
return obj.published_date
| 20.357143
| 47
| 0.687719
|
from django.contrib.sitemaps import Sitemap
from Blog.models import Post
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Post.objects.filter(status=True)
def lastmod(self, obj):
return obj.published_date
| true
| true
|
f716d5594aa167180b878069286e9c1308907fdf
| 9,503
|
py
|
Python
|
tests/infra/jsonrpc.py
|
rschust/CCF
|
2ad5f162cd73c645070f26461d8d053b45f63c3e
|
[
"Apache-2.0"
] | null | null | null |
tests/infra/jsonrpc.py
|
rschust/CCF
|
2ad5f162cd73c645070f26461d8d053b45f63c3e
|
[
"Apache-2.0"
] | null | null | null |
tests/infra/jsonrpc.py
|
rschust/CCF
|
2ad5f162cd73c645070f26461d8d053b45f63c3e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import socket
import ssl
import msgpack
import struct
import select
import contextlib
import json
import logging
import time
import os
from enum import IntEnum
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import asymmetric
from loguru import logger as LOG
# Values defined in node/rpc/jsonrpc.h
class ErrorCode(IntEnum):
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
NODE_NOT_FOUND = -32604
INVALID_CLIENT_SIGNATURE = -32605
INVALID_CALLER_ID = -32606
CODE_ID_NOT_FOUND = -32607
CODE_ID_RETIRED = -32608
RPC_NOT_FORWARDED = -32609
SERVER_ERROR_START = -32000
TX_NOT_LEADER = -32001
TX_REPLICATED = -32002
TX_ROLLED_BACK = -32003
TX_FAILED_TO_COMMIT = -32004
TX_FAILED_TO_REPLICATE = -32005
SCRIPT_ERROR = -32006
INSUFFICIENT_RIGHTS = -32007
DENIED = -32008
TX_LEADER_UNKNOWN = -32009
RPC_NOT_SIGNED = -32010
SERVER_ERROR_END = -32099
def truncate(string, max_len=256):
if len(string) > 256:
return string[: 256 - 3] + "..."
else:
return string
class Request:
def __init__(self, id, method, params, jsonrpc="2.0"):
self.id = id
self.method = method
self.params = params
self.jsonrpc = jsonrpc
def to_dict(self):
return {
"id": self.id,
"method": self.method,
"jsonrpc": self.jsonrpc,
"params": self.params,
}
def to_msgpack(self):
return msgpack.packb(self.to_dict(), use_bin_type=True)
def to_json(self):
return json.dumps(self.to_dict()).encode()
class Response:
def __init__(
self,
id,
result=None,
error=None,
commit=None,
term=None,
global_commit=None,
jsonrpc="2.0",
):
self.id = id
self.result = result
self.error = error
self.jsonrpc = jsonrpc
self.commit = commit
self.term = term
self.global_commit = global_commit
self._attrs = set(locals()) - {"self"}
def to_dict(self):
d = {"id": self.id, "jsonrpc": self.jsonrpc}
if self.result is not None:
d["result"] = self.result
else:
d["error"] = self.error
return d
def _from_parsed(self, parsed):
def decode(sl, is_key=False):
if is_key and hasattr(sl, "decode"):
return sl.decode()
if hasattr(sl, "items"):
return {decode(k, is_key=True): decode(v) for k, v in sl.items()}
elif isinstance(sl, list):
return [decode(e) for e in sl]
else:
return sl
parsed_s = {
decode(attr, is_key=True): decode(value) for attr, value in parsed.items()
}
unexpected = parsed_s.keys() - self._attrs
if unexpected:
raise ValueError("Unexpected keys in response: {}".format(unexpected))
for attr, value in parsed_s.items():
setattr(self, attr, value)
def from_msgpack(self, data):
parsed = msgpack.unpackb(data)
self._from_parsed(parsed)
def from_json(self, data):
parsed = json.loads(data.decode())
self._from_parsed(parsed)
class FramedTLSClient:
def __init__(self, host, port, server_hostname, cert=None, key=None, cafile=None):
self.host = host
self.port = port
self.server_hostname = server_hostname
self.cert = cert
self.key = key
self.cafile = cafile
self.context = None
self.sock = None
self.conn = None
def connect(self):
if self.cafile:
self.context = ssl.create_default_context(cafile=self.cafile)
# Auto detect EC curve to use based on server CA
ca_bytes = open(self.cafile, "rb").read()
ca_curve = (
x509.load_pem_x509_certificate(ca_bytes, default_backend())
.public_key()
.curve
)
if isinstance(ca_curve, asymmetric.ec.SECP256K1):
self.context.set_ecdh_curve("secp256k1")
else:
self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if self.cert and self.key:
self.context.load_cert_chain(certfile=self.cert, keyfile=self.key)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn = self.context.wrap_socket(
self.sock, server_side=False, server_hostname=self.server_hostname
)
self.conn.connect((self.host, self.port))
def send(self, msg):
frame = struct.pack("<I", len(msg)) + msg
self.conn.sendall(frame)
def _read(self):
size, = struct.unpack("<I", self.conn.recv(4))
data = self.conn.recv(size)
while len(data) < size:
data += self.conn.recv(size - len(data))
return data
def read(self):
for _ in range(5000):
r, _, _ = select.select([self.conn], [], [], 0)
if r:
return self._read()
else:
time.sleep(0.01)
def disconnect(self):
self.conn.close()
class Stream:
def __init__(self, jsonrpc="2.0", format="msgpack"):
self.jsonrpc = jsonrpc
self.seqno = 0
self.pending = {}
self.format = format
def request(self, method, params):
r = Request(self.seqno, method, params, self.jsonrpc)
self.seqno += 1
return r
def response(self, id):
return self.pending.pop(id, None)
def update(self, msg):
r = Response(0)
getattr(r, "from_{}".format(self.format))(msg)
self.pending[r.id] = r
class RPCLogger:
def log_request(self, request, name, description):
LOG.info(
truncate(
"{} #{} {} {}{}".format(
name, request.id, request.method, request.params, description
)
)
)
def log_response(self, response):
LOG.debug(
truncate(
"#{} {}".format(
response.id,
{
k: v
for k, v in (response.__dict__ or {}).items()
if not k.startswith("_")
},
)
)
)
class RPCFileLogger(RPCLogger):
def __init__(self, path):
self.path = path
def log_request(self, request, name, description):
with open(self.path, "a") as f:
f.write(">> Request:" + os.linesep)
json.dump(request.to_dict(), f, indent=2)
f.write(os.linesep)
def log_response(self, response):
with open(self.path, "a") as f:
f.write("<< Response:" + os.linesep)
json.dump(response.to_dict(), f, indent=2)
f.write(os.linesep)
class FramedTLSJSONRPCClient:
def __init__(
self,
host,
port,
server_hostname,
cert=None,
key=None,
cafile=None,
version="2.0",
format="msgpack",
description=None,
):
self.client = FramedTLSClient(host, port, server_hostname, cert, key, cafile)
self.stream = Stream(version, format=format)
self.format = format
self.name = "[{}:{}]".format(host, port)
self.description = description
self.rpc_loggers = (RPCLogger(),)
def connect(self):
return self.client.connect()
def disconnect(self):
return self.client.disconnect()
def request(self, method, params):
r = self.stream.request(method, params)
self.client.send(getattr(r, "to_{}".format(self.format))())
description = ""
if self.description:
description = " ({})".format(self.description)
for logger in self.rpc_loggers:
logger.log_request(r, self.name, description)
return r.id
def tick(self):
msg = self.client.read()
self.stream.update(msg)
def response(self, id):
self.tick()
r = self.stream.response(id)
for logger in self.rpc_loggers:
logger.log_response(r)
return r
def do(self, method, params, expected_result=None, expected_error_code=None):
id = self.request(method, params)
r = self.response(id)
if expected_result is not None:
assert expected_result == r.result
if expected_error_code is not None:
assert expected_error_code.value == r.error["code"]
return r
def rpc(self, method, params):
id = self.request(method, params)
return self.response(id)
@contextlib.contextmanager
def client(
host,
port,
server_hostname="users",
cert=None,
key=None,
cafile=None,
version="2.0",
format="msgpack",
description=None,
log_file=None,
):
c = FramedTLSJSONRPCClient(
host, port, server_hostname, cert, key, cafile, version, format, description
)
if log_file is not None:
c.rpc_loggers += (RPCFileLogger(log_file),)
c.connect()
try:
yield c
finally:
c.disconnect()
| 27.78655
| 86
| 0.573398
|
import socket
import ssl
import msgpack
import struct
import select
import contextlib
import json
import logging
import time
import os
from enum import IntEnum
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import asymmetric
from loguru import logger as LOG
class ErrorCode(IntEnum):
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
NODE_NOT_FOUND = -32604
INVALID_CLIENT_SIGNATURE = -32605
INVALID_CALLER_ID = -32606
CODE_ID_NOT_FOUND = -32607
CODE_ID_RETIRED = -32608
RPC_NOT_FORWARDED = -32609
SERVER_ERROR_START = -32000
TX_NOT_LEADER = -32001
TX_REPLICATED = -32002
TX_ROLLED_BACK = -32003
TX_FAILED_TO_COMMIT = -32004
TX_FAILED_TO_REPLICATE = -32005
SCRIPT_ERROR = -32006
INSUFFICIENT_RIGHTS = -32007
DENIED = -32008
TX_LEADER_UNKNOWN = -32009
RPC_NOT_SIGNED = -32010
SERVER_ERROR_END = -32099
def truncate(string, max_len=256):
if len(string) > 256:
return string[: 256 - 3] + "..."
else:
return string
class Request:
def __init__(self, id, method, params, jsonrpc="2.0"):
self.id = id
self.method = method
self.params = params
self.jsonrpc = jsonrpc
def to_dict(self):
return {
"id": self.id,
"method": self.method,
"jsonrpc": self.jsonrpc,
"params": self.params,
}
def to_msgpack(self):
return msgpack.packb(self.to_dict(), use_bin_type=True)
def to_json(self):
return json.dumps(self.to_dict()).encode()
class Response:
def __init__(
self,
id,
result=None,
error=None,
commit=None,
term=None,
global_commit=None,
jsonrpc="2.0",
):
self.id = id
self.result = result
self.error = error
self.jsonrpc = jsonrpc
self.commit = commit
self.term = term
self.global_commit = global_commit
self._attrs = set(locals()) - {"self"}
def to_dict(self):
d = {"id": self.id, "jsonrpc": self.jsonrpc}
if self.result is not None:
d["result"] = self.result
else:
d["error"] = self.error
return d
def _from_parsed(self, parsed):
def decode(sl, is_key=False):
if is_key and hasattr(sl, "decode"):
return sl.decode()
if hasattr(sl, "items"):
return {decode(k, is_key=True): decode(v) for k, v in sl.items()}
elif isinstance(sl, list):
return [decode(e) for e in sl]
else:
return sl
parsed_s = {
decode(attr, is_key=True): decode(value) for attr, value in parsed.items()
}
unexpected = parsed_s.keys() - self._attrs
if unexpected:
raise ValueError("Unexpected keys in response: {}".format(unexpected))
for attr, value in parsed_s.items():
setattr(self, attr, value)
def from_msgpack(self, data):
parsed = msgpack.unpackb(data)
self._from_parsed(parsed)
def from_json(self, data):
parsed = json.loads(data.decode())
self._from_parsed(parsed)
class FramedTLSClient:
def __init__(self, host, port, server_hostname, cert=None, key=None, cafile=None):
self.host = host
self.port = port
self.server_hostname = server_hostname
self.cert = cert
self.key = key
self.cafile = cafile
self.context = None
self.sock = None
self.conn = None
def connect(self):
if self.cafile:
self.context = ssl.create_default_context(cafile=self.cafile)
ca_bytes = open(self.cafile, "rb").read()
ca_curve = (
x509.load_pem_x509_certificate(ca_bytes, default_backend())
.public_key()
.curve
)
if isinstance(ca_curve, asymmetric.ec.SECP256K1):
self.context.set_ecdh_curve("secp256k1")
else:
self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if self.cert and self.key:
self.context.load_cert_chain(certfile=self.cert, keyfile=self.key)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn = self.context.wrap_socket(
self.sock, server_side=False, server_hostname=self.server_hostname
)
self.conn.connect((self.host, self.port))
def send(self, msg):
frame = struct.pack("<I", len(msg)) + msg
self.conn.sendall(frame)
def _read(self):
size, = struct.unpack("<I", self.conn.recv(4))
data = self.conn.recv(size)
while len(data) < size:
data += self.conn.recv(size - len(data))
return data
def read(self):
for _ in range(5000):
r, _, _ = select.select([self.conn], [], [], 0)
if r:
return self._read()
else:
time.sleep(0.01)
def disconnect(self):
self.conn.close()
class Stream:
def __init__(self, jsonrpc="2.0", format="msgpack"):
self.jsonrpc = jsonrpc
self.seqno = 0
self.pending = {}
self.format = format
def request(self, method, params):
r = Request(self.seqno, method, params, self.jsonrpc)
self.seqno += 1
return r
def response(self, id):
return self.pending.pop(id, None)
def update(self, msg):
r = Response(0)
getattr(r, "from_{}".format(self.format))(msg)
self.pending[r.id] = r
class RPCLogger:
def log_request(self, request, name, description):
LOG.info(
truncate(
"{} #{} {} {}{}".format(
name, request.id, request.method, request.params, description
)
)
)
def log_response(self, response):
LOG.debug(
truncate(
"#{} {}".format(
response.id,
{
k: v
for k, v in (response.__dict__ or {}).items()
if not k.startswith("_")
},
)
)
)
class RPCFileLogger(RPCLogger):
def __init__(self, path):
self.path = path
def log_request(self, request, name, description):
with open(self.path, "a") as f:
f.write(">> Request:" + os.linesep)
json.dump(request.to_dict(), f, indent=2)
f.write(os.linesep)
def log_response(self, response):
with open(self.path, "a") as f:
f.write("<< Response:" + os.linesep)
json.dump(response.to_dict(), f, indent=2)
f.write(os.linesep)
class FramedTLSJSONRPCClient:
def __init__(
self,
host,
port,
server_hostname,
cert=None,
key=None,
cafile=None,
version="2.0",
format="msgpack",
description=None,
):
self.client = FramedTLSClient(host, port, server_hostname, cert, key, cafile)
self.stream = Stream(version, format=format)
self.format = format
self.name = "[{}:{}]".format(host, port)
self.description = description
self.rpc_loggers = (RPCLogger(),)
def connect(self):
return self.client.connect()
def disconnect(self):
return self.client.disconnect()
def request(self, method, params):
r = self.stream.request(method, params)
self.client.send(getattr(r, "to_{}".format(self.format))())
description = ""
if self.description:
description = " ({})".format(self.description)
for logger in self.rpc_loggers:
logger.log_request(r, self.name, description)
return r.id
def tick(self):
msg = self.client.read()
self.stream.update(msg)
def response(self, id):
self.tick()
r = self.stream.response(id)
for logger in self.rpc_loggers:
logger.log_response(r)
return r
def do(self, method, params, expected_result=None, expected_error_code=None):
id = self.request(method, params)
r = self.response(id)
if expected_result is not None:
assert expected_result == r.result
if expected_error_code is not None:
assert expected_error_code.value == r.error["code"]
return r
def rpc(self, method, params):
id = self.request(method, params)
return self.response(id)
@contextlib.contextmanager
def client(
host,
port,
server_hostname="users",
cert=None,
key=None,
cafile=None,
version="2.0",
format="msgpack",
description=None,
log_file=None,
):
c = FramedTLSJSONRPCClient(
host, port, server_hostname, cert, key, cafile, version, format, description
)
if log_file is not None:
c.rpc_loggers += (RPCFileLogger(log_file),)
c.connect()
try:
yield c
finally:
c.disconnect()
| true
| true
|
f716d560b71358551851abec5503a1ab0331080f
| 8,345
|
py
|
Python
|
tests/providers/test_automotive.py
|
MarcelRobeer/faker
|
016ef66c6852ed7d5f198b54dc620bd784ce58c2
|
[
"MIT"
] | null | null | null |
tests/providers/test_automotive.py
|
MarcelRobeer/faker
|
016ef66c6852ed7d5f198b54dc620bd784ce58c2
|
[
"MIT"
] | null | null | null |
tests/providers/test_automotive.py
|
MarcelRobeer/faker
|
016ef66c6852ed7d5f198b54dc620bd784ce58c2
|
[
"MIT"
] | null | null | null |
import re
from typing import Pattern
from faker.providers.automotive.de_DE import Provider as DeDeAutomotiveProvider
from faker.providers.automotive.es_ES import Provider as EsEsAutomotiveProvider
from faker.providers.automotive.ro_RO import Provider as RoRoAutomotiveProvider
from faker.providers.automotive.ru_RU import Provider as RuRuAutomotiveProvider
from faker.providers.automotive.sk_SK import Provider as SkSkAutomotiveProvider
from faker.providers.automotive.tr_TR import Provider as TrTrAutomotiveProvider
class _SimpleAutomotiveTestMixin:
"""Use this test mixin for simple license plate validation"""
def perform_extra_checks(self, license_plate, match):
pass
def test_license_plate(self, faker, num_samples):
for _ in range(num_samples):
license_plate = faker.license_plate()
match = self.license_plate_pattern.fullmatch(license_plate)
assert match
self.perform_extra_checks(license_plate, match)
class TestSkSk(_SimpleAutomotiveTestMixin):
"""Test sk_SK automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'(?P<prefix>[A-Z]{2})\d{3}[A-Z]{2}')
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in SkSkAutomotiveProvider.license_plate_prefix
class TestPtBr(_SimpleAutomotiveTestMixin):
"""Test pt_BR automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{4}')
class TestPtPt(_SimpleAutomotiveTestMixin):
"""Test pt_PT automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r'\d{2}-\d{2}-[A-Z]{2}|'
r'\d{2}-[A-Z]{2}-\d{2}|'
r'[A-Z]{2}-\d{2}-\d{2}|'
r'[A-Z]{2}-\d{2}-[A-Z]{2}',
)
class TestHeIl(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'(\d{3}-\d{2}-\d{3})|(\d{2}-\d{3}-\d{2})')
class TestHuHu(_SimpleAutomotiveTestMixin):
"""Test hu_HU automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{3}')
class TestDeDe(_SimpleAutomotiveTestMixin):
"""Test de_DE automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r'(?P<prefix>[A-Z\u00D6\u00DC]{1,3})-[A-Z]{1,2}-[1-9]{1,4}',
re.UNICODE,
)
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in DeDeAutomotiveProvider.license_plate_prefix
class TestSvSe(_SimpleAutomotiveTestMixin):
"""Test sv_SE automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3} \d{2}[\dA-Z]')
class TestPlPl:
def test_License_plate(self, faker, num_samples):
pattern: Pattern = re.compile(r'{patterns}'.format(patterns='|'.join(faker.license_plate_regex_formats())))
for _ in range(num_samples):
plate = faker.license_plate()
assert pattern.fullmatch(plate)
class TestEnPh(_SimpleAutomotiveTestMixin):
"""Test en_PH automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}|[A-Z]{3}\d{3,4}')
motorcycle_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}')
automobile_pattern: Pattern = re.compile(r'[A-Z]{3}\d{3,4}')
def test_motorcycle_plate(self, faker, num_samples):
for _ in range(num_samples):
assert self.motorcycle_pattern.match(faker.motorcycle_license_plate())
def test_automobile_plate(self, faker, num_samples):
for _ in range(num_samples):
assert self.automobile_pattern.match(faker.automobile_license_plate())
def test_protocol_plate(self, faker, num_samples):
for _ in range(num_samples):
protocol_plate = faker.protocol_license_plate()
assert int(protocol_plate) != 15 and 1 <= int(protocol_plate) <= 17
class TestFilPh(TestEnPh):
"""Test fil_PH automotive provider methods"""
pass
class TestTlPh(TestEnPh):
"""Test tl_PH automotive provider methods"""
pass
class TestRuRu(_SimpleAutomotiveTestMixin):
"""Test ru_RU automotive provider methods"""
_plate_letters = ''.join(RuRuAutomotiveProvider.license_plate_letters)
license_plate_pattern: Pattern = re.compile(
r'(?:'
r'(?P<private_plate_prefix>[{0}]\d\d\d[{0}][{0}])|'
r'(?P<public_transport_plate_prefix>[{0}][{0}]\d\d\d)|'
r'(?P<trailer_plate_prefix>[{0}][{0}]\d\d\d\d)|'
r'(?P<police_plate_prefix>[{0}]\d\d\d\d)|'
r'(?P<military_plate_prefix>\d\d\d\d[{0}][{0}])|'
r'(?P<plate_number_special>00\dCD\d|00\dD\d\d\d|00\dT\d\d\d)'
r') (?P<plate_suffix>.*)'.format(_plate_letters),
)
def perform_extra_checks(self, license_plate, match):
plate_suffix = match.group('plate_suffix')
assert plate_suffix in RuRuAutomotiveProvider.license_plate_suffix
def test_vehicle_category(self, faker, num_samples):
for _ in range(num_samples):
vehicle_category = faker.vehicle_category()
assert isinstance(vehicle_category, str)
assert vehicle_category in RuRuAutomotiveProvider.vehicle_categories
class TestFrFr(_SimpleAutomotiveTestMixin):
"""Test fr_FR automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'\d{3}-[A-Z]{3}-\d{2}|[A-Z]{2}-\d{3}-[A-Z]{2}')
class TestNoNo(_SimpleAutomotiveTestMixin):
"""Test no_NO automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r'[A-Z]{2} \d{5}')
class TestEsEs:
"""Test es_ES automotive provider methods"""
new_format_pattern: Pattern = re.compile(r'\d{4}\s[A-Z]{3}')
old_format_pattern: Pattern = re.compile(r'(?P<province_prefix>[A-Z]{1,2})\s\d{4}\s[A-Z]{2}')
def test_plate_new_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_unified()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate)
def test_plate_old_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province()
assert isinstance(plate, str)
match = self.old_format_pattern.match(plate)
assert match
assert match.group('province_prefix') in EsEsAutomotiveProvider.province_prefix
def test_plate_old_format_explicit_province_prefix(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province(province_prefix="CA")
assert isinstance(plate, str)
assert self.old_format_pattern.match(plate)
assert plate[:2] == "CA"
def test_plate_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate) or self.old_format_pattern.match(plate)
class TestThTh(_SimpleAutomotiveTestMixin):
"""Test th_TH automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r'(\d [ก-ฮ]{2} \d{1,4})|' # car
r'([ก-ฮ]{2} \d{1,4})|' # car
r'([ก-ฮ]{3} \d{1,3})|' # motorcycle
r'(\d{2}-\d{4})', # truck
)
class TestTrTr(_SimpleAutomotiveTestMixin):
"""Test tr_TR automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r'\d{2} [A-Z] \d{4}|'
r'\d{2} [A-Z] \d{5}|'
r'\d{2} [A-Z]{2} \d{3}|'
r'\d{2} [A-Z]{2} \d{4}|'
r'\d{2} [A-Z]{3} \d{2}|'
r'\d{2} [A-Z]{3} \d{3}',
)
def perform_extra_checks(self, license_plate, match):
[city_code, letters, _] = license_plate.split(' ')
assert int(city_code) in range(1, 82)
assert all(letter in TrTrAutomotiveProvider.ascii_uppercase_turkish for letter in letters)
class TestRoRo(_SimpleAutomotiveTestMixin):
"""Test ro_RO automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r'(?P<prefix>[A-Z]{1,2})-\d{2,3}-[A-Z]{3}')
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in RoRoAutomotiveProvider.license_plate_prefix
class TestElGr(_SimpleAutomotiveTestMixin):
"""Test el_GR automotive provider methods"""
license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
| 37.59009
| 115
| 0.670461
|
import re
from typing import Pattern
from faker.providers.automotive.de_DE import Provider as DeDeAutomotiveProvider
from faker.providers.automotive.es_ES import Provider as EsEsAutomotiveProvider
from faker.providers.automotive.ro_RO import Provider as RoRoAutomotiveProvider
from faker.providers.automotive.ru_RU import Provider as RuRuAutomotiveProvider
from faker.providers.automotive.sk_SK import Provider as SkSkAutomotiveProvider
from faker.providers.automotive.tr_TR import Provider as TrTrAutomotiveProvider
class _SimpleAutomotiveTestMixin:
def perform_extra_checks(self, license_plate, match):
pass
def test_license_plate(self, faker, num_samples):
for _ in range(num_samples):
license_plate = faker.license_plate()
match = self.license_plate_pattern.fullmatch(license_plate)
assert match
self.perform_extra_checks(license_plate, match)
class TestSkSk(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'(?P<prefix>[A-Z]{2})\d{3}[A-Z]{2}')
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in SkSkAutomotiveProvider.license_plate_prefix
class TestPtBr(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{4}')
class TestPtPt(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(
r'\d{2}-\d{2}-[A-Z]{2}|'
r'\d{2}-[A-Z]{2}-\d{2}|'
r'[A-Z]{2}-\d{2}-\d{2}|'
r'[A-Z]{2}-\d{2}-[A-Z]{2}',
)
class TestHeIl(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'(\d{3}-\d{2}-\d{3})|(\d{2}-\d{3}-\d{2})')
class TestHuHu(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{3}')
class TestDeDe(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(
r'(?P<prefix>[A-Z\u00D6\u00DC]{1,3})-[A-Z]{1,2}-[1-9]{1,4}',
re.UNICODE,
)
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in DeDeAutomotiveProvider.license_plate_prefix
class TestSvSe(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'[A-Z]{3} \d{2}[\dA-Z]')
class TestPlPl:
def test_License_plate(self, faker, num_samples):
pattern: Pattern = re.compile(r'{patterns}'.format(patterns='|'.join(faker.license_plate_regex_formats())))
for _ in range(num_samples):
plate = faker.license_plate()
assert pattern.fullmatch(plate)
class TestEnPh(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}|[A-Z]{3}\d{3,4}')
motorcycle_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}')
automobile_pattern: Pattern = re.compile(r'[A-Z]{3}\d{3,4}')
def test_motorcycle_plate(self, faker, num_samples):
for _ in range(num_samples):
assert self.motorcycle_pattern.match(faker.motorcycle_license_plate())
def test_automobile_plate(self, faker, num_samples):
for _ in range(num_samples):
assert self.automobile_pattern.match(faker.automobile_license_plate())
def test_protocol_plate(self, faker, num_samples):
for _ in range(num_samples):
protocol_plate = faker.protocol_license_plate()
assert int(protocol_plate) != 15 and 1 <= int(protocol_plate) <= 17
class TestFilPh(TestEnPh):
pass
class TestTlPh(TestEnPh):
pass
class TestRuRu(_SimpleAutomotiveTestMixin):
_plate_letters = ''.join(RuRuAutomotiveProvider.license_plate_letters)
license_plate_pattern: Pattern = re.compile(
r'(?:'
r'(?P<private_plate_prefix>[{0}]\d\d\d[{0}][{0}])|'
r'(?P<public_transport_plate_prefix>[{0}][{0}]\d\d\d)|'
r'(?P<trailer_plate_prefix>[{0}][{0}]\d\d\d\d)|'
r'(?P<police_plate_prefix>[{0}]\d\d\d\d)|'
r'(?P<military_plate_prefix>\d\d\d\d[{0}][{0}])|'
r'(?P<plate_number_special>00\dCD\d|00\dD\d\d\d|00\dT\d\d\d)'
r') (?P<plate_suffix>.*)'.format(_plate_letters),
)
def perform_extra_checks(self, license_plate, match):
plate_suffix = match.group('plate_suffix')
assert plate_suffix in RuRuAutomotiveProvider.license_plate_suffix
def test_vehicle_category(self, faker, num_samples):
for _ in range(num_samples):
vehicle_category = faker.vehicle_category()
assert isinstance(vehicle_category, str)
assert vehicle_category in RuRuAutomotiveProvider.vehicle_categories
class TestFrFr(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'\d{3}-[A-Z]{3}-\d{2}|[A-Z]{2}-\d{3}-[A-Z]{2}')
class TestNoNo(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(r'[A-Z]{2} \d{5}')
class TestEsEs:
new_format_pattern: Pattern = re.compile(r'\d{4}\s[A-Z]{3}')
old_format_pattern: Pattern = re.compile(r'(?P<province_prefix>[A-Z]{1,2})\s\d{4}\s[A-Z]{2}')
def test_plate_new_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_unified()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate)
def test_plate_old_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province()
assert isinstance(plate, str)
match = self.old_format_pattern.match(plate)
assert match
assert match.group('province_prefix') in EsEsAutomotiveProvider.province_prefix
def test_plate_old_format_explicit_province_prefix(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province(province_prefix="CA")
assert isinstance(plate, str)
assert self.old_format_pattern.match(plate)
assert plate[:2] == "CA"
def test_plate_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate) or self.old_format_pattern.match(plate)
class TestThTh(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(
r'(\d [ก-ฮ]{2} \d{1,4})|'
r'([ก-ฮ]{2} \d{1,4})|'
r'([ก-ฮ]{3} \d{1,3})|'
r'(\d{2}-\d{4})',
)
class TestTrTr(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(
r'\d{2} [A-Z] \d{4}|'
r'\d{2} [A-Z] \d{5}|'
r'\d{2} [A-Z]{2} \d{3}|'
r'\d{2} [A-Z]{2} \d{4}|'
r'\d{2} [A-Z]{3} \d{2}|'
r'\d{2} [A-Z]{3} \d{3}',
)
def perform_extra_checks(self, license_plate, match):
[city_code, letters, _] = license_plate.split(' ')
assert int(city_code) in range(1, 82)
assert all(letter in TrTrAutomotiveProvider.ascii_uppercase_turkish for letter in letters)
class TestRoRo(_SimpleAutomotiveTestMixin):
license_plate_pattern: Pattern = re.compile(
r'(?P<prefix>[A-Z]{1,2})-\d{2,3}-[A-Z]{3}')
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in RoRoAutomotiveProvider.license_plate_prefix
class TestElGr(_SimpleAutomotiveTestMixin):
license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
| true
| true
|
f716d625081f9ebcae5efd23c12ecb9272c56c04
| 887
|
py
|
Python
|
qubo_nn/plots/gen_tsne_gen4.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | 9
|
2021-09-17T09:40:59.000Z
|
2022-03-29T13:41:25.000Z
|
qubo_nn/plots/gen_tsne_gen4.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | null | null | null |
qubo_nn/plots/gen_tsne_gen4.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | 4
|
2022-03-06T19:26:19.000Z
|
2022-03-29T13:41:37.000Z
|
import pickle
import numpy as np
from MulticoreTSNE import MulticoreTSNE as TSNE
from qubo_nn.data import LMDBDataLoader
from qubo_nn.config import Config
cfg_id = '27_gen4'
cfg = Config('../').get_cfg(cfg_id)
cfg["use_big"] = False
lmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../')
X = []
y = []
for i, data in enumerate(lmdb_loader.train_data_loader):
if i > 43: # 44 batches á 500 = 22k (from total of 440k), so 5%
break
X.extend(data[0].tolist())
y.extend(data[1].tolist())
X = np.array(X)
X = X.reshape(-1, 64**2)
print(X.shape)
for i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]:
tsne = TSNE(
n_jobs=10,
n_iter=5000,
perplexity=i,
# perplexity=500., # Best.
verbose=1
)
Y = tsne.fit_transform(X)
with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f:
pickle.dump((Y, y), f)
| 23.972973
| 68
| 0.622322
|
import pickle
import numpy as np
from MulticoreTSNE import MulticoreTSNE as TSNE
from qubo_nn.data import LMDBDataLoader
from qubo_nn.config import Config
cfg_id = '27_gen4'
cfg = Config('../').get_cfg(cfg_id)
cfg["use_big"] = False
lmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../')
X = []
y = []
for i, data in enumerate(lmdb_loader.train_data_loader):
if i > 43:
break
X.extend(data[0].tolist())
y.extend(data[1].tolist())
X = np.array(X)
X = X.reshape(-1, 64**2)
print(X.shape)
for i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]:
tsne = TSNE(
n_jobs=10,
n_iter=5000,
perplexity=i,
verbose=1
)
Y = tsne.fit_transform(X)
with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f:
pickle.dump((Y, y), f)
| true
| true
|
f716d781ce1344228dd00f5ca854221451fb21f6
| 1,862
|
py
|
Python
|
tests/extras/test_tooltips.py
|
Akuli/tkinder
|
c360fbfe086ca09cdd856a8636de05b24e1b7093
|
[
"MIT"
] | 23
|
2019-01-15T00:07:30.000Z
|
2022-01-18T06:19:18.000Z
|
tests/extras/test_tooltips.py
|
Akuli/tkinder
|
c360fbfe086ca09cdd856a8636de05b24e1b7093
|
[
"MIT"
] | 12
|
2019-01-13T19:51:52.000Z
|
2021-05-17T17:55:51.000Z
|
tests/extras/test_tooltips.py
|
Akuli/pythotk
|
c360fbfe086ca09cdd856a8636de05b24e1b7093
|
[
"MIT"
] | 7
|
2019-01-13T19:48:26.000Z
|
2021-04-21T13:30:21.000Z
|
import time
import types
import pytest
import teek
from teek.extras import tooltips
def run_event_loop(for_how_long):
# this is dumb
start = time.time()
while time.time() < start + for_how_long:
teek.update()
@pytest.mark.slow
def test_set_tooltip():
window = teek.Window()
assert not hasattr(window, '_tooltip_manager')
tooltips.set_tooltip(window, None)
assert not hasattr(window, '_tooltip_manager')
tooltips.set_tooltip(window, 'Boo')
assert window._tooltip_manager.text == 'Boo'
tooltips.set_tooltip(window, None)
assert window._tooltip_manager.text is None
tooltips.set_tooltip(window, 'lol')
assert window._tooltip_manager.text == 'lol'
N = types.SimpleNamespace # because pep8 line length
assert not window._tooltip_manager.got_mouse
window._tooltip_manager.enter(N(widget=window, rootx=123, rooty=456))
assert window._tooltip_manager.got_mouse
assert window._tooltip_manager.mousex == 123
assert window._tooltip_manager.mousey == 456
window._tooltip_manager.motion(N(rootx=789, rooty=101112))
assert window._tooltip_manager.got_mouse
assert window._tooltip_manager.mousex == 789
assert window._tooltip_manager.mousey == 101112
run_event_loop(1.1)
assert window._tooltip_manager.tipwindow is not None
assert window._tooltip_manager.got_mouse
window._tooltip_manager.leave(N(widget=window))
assert not window._tooltip_manager.got_mouse
assert window._tooltip_manager.tipwindow is None
# what happens if the window gets destroyed before it's supposed to show?
window._tooltip_manager.enter(N(widget=window, rootx=1, rooty=2))
window._tooltip_manager.leave(N(widget=window))
assert window._tooltip_manager.tipwindow is None
run_event_loop(1.1)
assert window._tooltip_manager.tipwindow is None
| 31.559322
| 77
| 0.749194
|
import time
import types
import pytest
import teek
from teek.extras import tooltips
def run_event_loop(for_how_long):
start = time.time()
while time.time() < start + for_how_long:
teek.update()
@pytest.mark.slow
def test_set_tooltip():
window = teek.Window()
assert not hasattr(window, '_tooltip_manager')
tooltips.set_tooltip(window, None)
assert not hasattr(window, '_tooltip_manager')
tooltips.set_tooltip(window, 'Boo')
assert window._tooltip_manager.text == 'Boo'
tooltips.set_tooltip(window, None)
assert window._tooltip_manager.text is None
tooltips.set_tooltip(window, 'lol')
assert window._tooltip_manager.text == 'lol'
N = types.SimpleNamespace
assert not window._tooltip_manager.got_mouse
window._tooltip_manager.enter(N(widget=window, rootx=123, rooty=456))
assert window._tooltip_manager.got_mouse
assert window._tooltip_manager.mousex == 123
assert window._tooltip_manager.mousey == 456
window._tooltip_manager.motion(N(rootx=789, rooty=101112))
assert window._tooltip_manager.got_mouse
assert window._tooltip_manager.mousex == 789
assert window._tooltip_manager.mousey == 101112
run_event_loop(1.1)
assert window._tooltip_manager.tipwindow is not None
assert window._tooltip_manager.got_mouse
window._tooltip_manager.leave(N(widget=window))
assert not window._tooltip_manager.got_mouse
assert window._tooltip_manager.tipwindow is None
window._tooltip_manager.enter(N(widget=window, rootx=1, rooty=2))
window._tooltip_manager.leave(N(widget=window))
assert window._tooltip_manager.tipwindow is None
run_event_loop(1.1)
assert window._tooltip_manager.tipwindow is None
| true
| true
|
f716d8668d7f3e71327a13ddba27d41f18e2ef20
| 49
|
py
|
Python
|
ecommerce/shipping.py
|
broach44/beginning-python
|
54fb51ce666e263e7a76c37bb39cb6df636886ca
|
[
"MIT"
] | null | null | null |
ecommerce/shipping.py
|
broach44/beginning-python
|
54fb51ce666e263e7a76c37bb39cb6df636886ca
|
[
"MIT"
] | null | null | null |
ecommerce/shipping.py
|
broach44/beginning-python
|
54fb51ce666e263e7a76c37bb39cb6df636886ca
|
[
"MIT"
] | null | null | null |
def calc_shipping():
print("calc shipping")
| 12.25
| 26
| 0.673469
|
def calc_shipping():
print("calc shipping")
| true
| true
|
f716d9590903bcc9299b90cc70855136916fd55d
| 1,613
|
py
|
Python
|
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
|
OSADP/TCA
|
25bc1c1db00393cc6b8c6764610bf381494dfcb9
|
[
"Apache-2.0"
] | 1
|
2021-05-22T00:06:09.000Z
|
2021-05-22T00:06:09.000Z
|
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
|
OSADP/TCA
|
25bc1c1db00393cc6b8c6764610bf381494dfcb9
|
[
"Apache-2.0"
] | null | null | null |
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
|
OSADP/TCA
|
25bc1c1db00393cc6b8c6764610bf381494dfcb9
|
[
"Apache-2.0"
] | null | null | null |
#standard
import unittest
import math
# from collections import OrderedDict
from random import uniform
#external
import pandas as pd
from scipy.spatial import KDTree
def Find_RSE_range(df, RSEs, minrange):
sub_df = df[['vehicle_ID', 'location_x', 'location_y']]
tree = KDTree(sub_df[['location_x', 'location_y']].values)
rse_points = list(RSEs.RSEListLocations.values())
locs_index = tree.query_ball_point(rse_points, r=minrange)
#link RSE back to vehicles
rse_vehicles = {}
for c, RSE in enumerate(RSEs.RSEListLocations.keys()):
if len(locs_index[c]) > 0:
vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist()
rse_vehicles[RSE] = vlist
else:
rse_vehicles[RSE] = []
return rse_vehicles
class BufferContentCheck(unittest.TestCase):
def setUp(self):
pass
def test_whole(self):
minrange = 4.00
num_vehicles = 10000
num_RSE = 30
# Vehicles_loc = {x:(uniform(0, 200), uniform(0, 200)) for x in range(num_vehicles)}
# df = pd.DataFrame({
# 'Vid' : ['V' + str(x) for x in Vehicles_loc.keys()],
# 'x' : [Vehicles_loc[x][0] for x in Vehicles_loc],
# 'y' : [Vehicles_loc[x][1] for x in Vehicles_loc],
# })
# df = df.set_index(['Vid'], drop=False)
# RSEs = OrderedDict({'RSE' + str(x):(uniform(0, 200), uniform(0, 200)) for x in range(num_RSE)})
# rse_info = Find_RSE_range(df, RSEs, minrange)
if __name__ == '__main__':
unittest.main()
| 26.442623
| 105
| 0.594544
|
import unittest
import math
from random import uniform
import pandas as pd
from scipy.spatial import KDTree
def Find_RSE_range(df, RSEs, minrange):
sub_df = df[['vehicle_ID', 'location_x', 'location_y']]
tree = KDTree(sub_df[['location_x', 'location_y']].values)
rse_points = list(RSEs.RSEListLocations.values())
locs_index = tree.query_ball_point(rse_points, r=minrange)
rse_vehicles = {}
for c, RSE in enumerate(RSEs.RSEListLocations.keys()):
if len(locs_index[c]) > 0:
vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist()
rse_vehicles[RSE] = vlist
else:
rse_vehicles[RSE] = []
return rse_vehicles
class BufferContentCheck(unittest.TestCase):
def setUp(self):
pass
def test_whole(self):
minrange = 4.00
num_vehicles = 10000
num_RSE = 30
if __name__ == '__main__':
unittest.main()
| true
| true
|
f716da328dd870f9e5d396ba489f4d3b821fa89f
| 720
|
py
|
Python
|
bin/make_halo_cnf_data.py
|
muntazirabidi/boss-sbi
|
fae016eb10b64153391499276d238ccdf660df88
|
[
"MIT"
] | 1
|
2022-03-15T18:13:02.000Z
|
2022-03-15T18:13:02.000Z
|
bin/make_halo_cnf_data.py
|
muntazirabidi/boss-sbi
|
fae016eb10b64153391499276d238ccdf660df88
|
[
"MIT"
] | 11
|
2020-12-16T18:26:31.000Z
|
2021-04-02T14:58:37.000Z
|
bin/make_halo_cnf_data.py
|
muntazirabidi/boss-sbi
|
fae016eb10b64153391499276d238ccdf660df88
|
[
"MIT"
] | 2
|
2021-03-29T17:33:54.000Z
|
2021-04-01T16:07:07.000Z
|
import os
import numpy as np
from simbig import halos as Halos
np.random.seed(918234)
theta_x_pairs = []
for i in range(1000):
# read in halo catalog
halos = Halos.Quijote_LHC_HR(i, z=0.5)
# impose random halo mass limit as a proxy for baryonic effect
Mlim = np.random.uniform(12.5, 13.0)
theta_cosmo = Halos.Quijote_LHC_cosmo(i)
# observable: I'm goign to use Nhalo as a proxy for some observable
Nhalos = np.sum(np.array(halos['Mass']) > Mlim)
# (parameter, data) pair
theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]])
theta_x_pairs.append(theta_x)
np.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))
| 28.8
| 103
| 0.6875
|
import os
import numpy as np
from simbig import halos as Halos
np.random.seed(918234)
theta_x_pairs = []
for i in range(1000):
halos = Halos.Quijote_LHC_HR(i, z=0.5)
Mlim = np.random.uniform(12.5, 13.0)
theta_cosmo = Halos.Quijote_LHC_cosmo(i)
Nhalos = np.sum(np.array(halos['Mass']) > Mlim)
# (parameter, data) pair
theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]])
theta_x_pairs.append(theta_x)
np.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))
| true
| true
|
f716da8fd6c32ba467ca558698e188b418c2559d
| 10,502
|
py
|
Python
|
scripts/build-ios.py
|
ArtronicsGame/mobile-sdk
|
492afb38fbf372d2e76534b8f92e433b7cfb69b5
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/build-ios.py
|
ArtronicsGame/mobile-sdk
|
492afb38fbf372d2e76534b8f92e433b7cfb69b5
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/build-ios.py
|
ArtronicsGame/mobile-sdk
|
492afb38fbf372d2e76534b8f92e433b7cfb69b5
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import re
import shutil
import argparse
import string
from build.sdk_build_utils import *
IOS_ARCHS = ['i386', 'x86_64', 'armv7', 'arm64']
def updateUmbrellaHeader(filename, args):
with open(filename, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1)
for i in range(0, len(lines)):
if re.search('^\s*#define\s+.*$', lines[i].rstrip('\n')):
break
lines = lines[:i+1] + ['\n'] + ['#define %s\n' % define for define in args.defines.split(';') if define] + lines[i+1:]
with open(filename, 'w') as f:
f.writelines(lines)
def updatePrivateHeader(filename, args):
with open(filename, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
match = re.search('^\s*#include\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#include <CartoMobileSDK/%s>\n' % match.group(1)
match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1)
with open(filename, 'w') as f:
f.writelines(lines)
def buildModuleMap(filename, publicHeaders, privateHeaders):
with open(filename, 'w') as f:
f.write('framework module CartoMobileSDK {\n')
f.write(' umbrella header "CartoMobileSDK.h"\n')
for header in publicHeaders:
f.write(' header "%s"\n' % header)
f.write(' export *\n')
f.write(' module * { export * }\n')
f.write(' explicit module Private {\n')
f.write(' requires cplusplus\n')
for header in privateHeaders:
f.write(' header "%s"\n' % header)
f.write(' }\n')
f.write('}\n')
def buildIOSLib(args, arch):
platform = 'OS' if arch.startswith('arm') else 'SIMULATOR'
version = getVersion(args.buildnumber) if args.configuration == 'Release' else 'Devel'
baseDir = getBaseDir()
buildDir = getBuildDir('ios', '%s-%s' % (platform, arch))
defines = ["-D%s" % define for define in args.defines.split(';') if define]
options = ["-D%s" % option for option in args.cmakeoptions.split(';') if option]
if not cmake(args, buildDir, options + [
'-G', 'Xcode',
'-DCMAKE_SYSTEM_NAME=iOS',
'-DWRAPPER_DIR=%s' % ('%s/generated/ios-objc/proxies' % baseDir),
'-DINCLUDE_OBJC:BOOL=ON',
'-DSINGLE_LIBRARY:BOOL=ON',
'-DENABLE_BITCODE:BOOL=%s' % ('OFF' if args.stripbitcode else 'ON'),
'-DSHARED_LIBRARY:BOOL=%s' % ('ON' if args.sharedlib else 'OFF'),
'-DCMAKE_OSX_ARCHITECTURES=%s' % arch,
'-DCMAKE_OSX_SYSROOT=iphone%s' % platform.lower(),
'-DCMAKE_OSX_DEPLOYMENT_TARGET=%s' % ('9.0' if args.metalangle else '7.0'),
'-DCMAKE_BUILD_TYPE=%s' % args.configuration,
"-DSDK_CPP_DEFINES=%s" % " ".join(defines),
"-DSDK_VERSION='%s'" % version,
"-DSDK_PLATFORM='iOS'",
'%s/scripts/build' % baseDir
]):
return False
return cmake(args, buildDir, [
'--build', '.',
'--config', args.configuration
])
def buildIOSFramework(args, archs):
shutil.rmtree(getDistDir('ios'), True)
platformArchs = [('OS' if arch.startswith('arm') else 'SIMULATOR', arch) for arch in archs]
baseDir = getBaseDir()
distDir = getDistDir('ios')
if args.sharedlib:
outputDir = '%s/CartoMobileSDK.framework' % distDir
else:
outputDir = '%s/CartoMobileSDK.framework/Versions/A' % distDir
makedirs(outputDir)
libFilePaths = []
for platform, arch in platformArchs:
libFilePath = "%s/%s-%s/libcarto_mobile_sdk.%s" % (getBuildDir('ios', '%s-%s' % (platform, arch)), args.configuration, 'iphoneos' if arch.startswith("arm") else 'iphonesimulator', 'dylib' if args.sharedlib else 'a')
if args.metalangle:
mergedLibFilePath = '%s_merged.%s' % tuple(libFilePath.rsplit('.', 1))
angleLibFilePath = "%s/libs-external/angle-metal/%s/libangle.a" % (baseDir, arch)
if not execute('libtool', baseDir,
'-o', mergedLibFilePath, libFilePath, angleLibFilePath
):
return False
libFilePath = mergedLibFilePath
libFilePaths.append(libFilePath)
if not execute('lipo', baseDir,
'-output', '%s/CartoMobileSDK' % outputDir,
'-create', *libFilePaths
):
return False
if args.sharedlib:
if not execute('install_name_tool', outputDir,
'-id', '@rpath/CartoMobileSDK.framework/CartoMobileSDK',
'CartoMobileSDK'
):
return False
if not copyfile('%s/scripts/ios/Info.plist' % baseDir, '%s/Info.plist' % outputDir):
return False
makedirs('%s/Headers' % outputDir)
if not args.sharedlib:
if not makesymlink('%s/CartoMobileSDK.framework/Versions' % distDir, 'A', 'Current'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Modules', 'Modules'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Headers', 'Headers'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/PrivateHeaders', 'PrivateHeaders'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/CartoMobileSDK', 'CartoMobileSDK'):
return False
publicHeaders = []
privateHeaders = []
headerDirTemplates = ['%s/all/native', '%s/ios/native', '%s/ios/objc', '%s/generated/ios-objc/proxies', '%s/libs-external/cglib']
if args.metalangle:
headerDirTemplates.append('%s/libs-external/angle-metal/include')
for headerDirTemplate in headerDirTemplates:
headerDir = headerDirTemplate % baseDir
if not os.path.exists(headerDir):
continue
currentDir = os.getcwd()
os.chdir(headerDir)
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.h'):
destDir = '%s/Headers/%s' % (outputDir, dirpath)
if headerDirTemplate.find('objc') == -1:
destDir = '%s/PrivateHeaders/%s' % (outputDir, dirpath)
privateHeaders.append(os.path.normpath(os.path.join(dirpath, filename)))
elif filename != 'CartoMobileSDK.h':
publicHeaders.append(os.path.normpath(os.path.join(dirpath, filename)))
if not (makedirs(destDir) and copyfile(os.path.join(dirpath, filename), '%s/%s' % (destDir, filename))):
os.chdir(currentDir)
return False
if filename == 'CartoMobileSDK.h':
updateUmbrellaHeader('%s/%s' % (destDir, filename), args)
else:
updatePrivateHeader('%s/%s' % (destDir, filename), args)
os.chdir(currentDir)
makedirs('%s/Modules' % outputDir)
buildModuleMap('%s/Modules/module.modulemap' % outputDir, publicHeaders, privateHeaders)
print("Output available in:\n%s" % distDir)
return True
def buildIOSCocoapod(args, buildpackage):
baseDir = getBaseDir()
distDir = getDistDir('ios')
version = args.buildversion
distName = 'sdk4-ios-%s.zip' % version
iosversion = '9.0' if args.metalangle else '7.0'
frameworks = (["IOSurface"] if args.metalangle else ["OpenGLES", "GLKit"]) + ["UIKit", "CoreGraphics", "CoreText", "CFNetwork", "Foundation", "CartoMobileSDK"]
with open('%s/scripts/ios-cocoapod/CartoMobileSDK.podspec.template' % baseDir, 'r') as f:
cocoapodFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'distDir': distDir, 'distName': distName, 'version': version, 'iosversion': iosversion, 'frameworks': ', '.join('"%s"' % framework for framework in frameworks) })
with open('%s/CartoMobileSDK.podspec' % distDir, 'w') as f:
f.write(cocoapodFile)
if buildpackage:
try:
os.remove('%s/%s' % (distDir, distName))
except:
pass
if not execute('zip', distDir, '-y', '-r', distName, 'CartoMobileSDK.framework'):
return False
print("Output available in:\n%s\n\nTo publish, use:\ncd %s\naws s3 cp %s s3://nutifront/sdk_snapshots/%s --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers\npod trunk push\n" % (distDir, distDir, distName, distName))
return True
parser = argparse.ArgumentParser()
parser.add_argument('--profile', dest='profile', default=getDefaultProfileId(), type=validProfile, help='Build profile')
parser.add_argument('--ios-arch', dest='iosarch', default=[], choices=IOS_ARCHS + ['all'], action='append', help='iOS target architectures')
parser.add_argument('--defines', dest='defines', default='', help='Defines for compilation')
parser.add_argument('--cmake', dest='cmake', default='cmake', help='CMake executable')
parser.add_argument('--cmake-options', dest='cmakeoptions', default='', help='CMake options')
parser.add_argument('--configuration', dest='configuration', default='Release', choices=['Release', 'RelWithDebInfo', 'Debug'], help='Configuration')
parser.add_argument('--build-number', dest='buildnumber', default='', help='Build sequence number, goes to version str')
parser.add_argument('--build-version', dest='buildversion', default='%s-devel' % SDK_VERSION, help='Build version, goes to distributions')
parser.add_argument('--build-cocoapod', dest='buildcocoapod', default=False, action='store_true', help='Build CocoaPod')
parser.add_argument('--build-cocoapod-package', dest='buildcocoapodpackage', default=False, action='store_true', help='Build CocoaPod')
parser.add_argument('--metalangle', dest='metalangle', default=False, action='store_true', help='Use MetalANGLE instead of Apple GL')
parser.add_argument('--strip-bitcode', dest='stripbitcode', default=False, action='store_true', help='Strip bitcode from the built framework')
parser.add_argument('--shared-framework', dest='sharedlib', default=False, action='store_true', help='Build shared framework instead of static')
args = parser.parse_args()
if 'all' in args.iosarch or args.iosarch == []:
args.iosarch = IOS_ARCHS
args.defines += ';' + getProfile(args.profile).get('defines', '')
if args.metalangle:
args.defines += ';' + '_CARTO_USE_METALANGLE'
args.cmakeoptions += ';' + getProfile(args.profile).get('cmake-options', '')
if not checkExecutable(args.cmake, '--help'):
print('Failed to find CMake executable. Use --cmake to specify its location')
sys.exit(-1)
for arch in args.iosarch:
if not buildIOSLib(args, arch):
sys.exit(-1)
if not buildIOSFramework(args, args.iosarch):
sys.exit(-1)
if args.buildcocoapod or args.buildcocoapodpackage:
if not buildIOSCocoapod(args, args.buildcocoapodpackage):
sys.exit(-1)
| 45.463203
| 245
| 0.668444
|
import os
import sys
import re
import shutil
import argparse
import string
from build.sdk_build_utils import *
IOS_ARCHS = ['i386', 'x86_64', 'armv7', 'arm64']
def updateUmbrellaHeader(filename, args):
with open(filename, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1)
for i in range(0, len(lines)):
if re.search('^\s*#define\s+.*$', lines[i].rstrip('\n')):
break
lines = lines[:i+1] + ['\n'] + ['#define %s\n' % define for define in args.defines.split(';') if define] + lines[i+1:]
with open(filename, 'w') as f:
f.writelines(lines)
def updatePrivateHeader(filename, args):
with open(filename, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
match = re.search('^\s*#include\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#include <CartoMobileSDK/%s>\n' % match.group(1)
match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n'))
if match:
lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1)
with open(filename, 'w') as f:
f.writelines(lines)
def buildModuleMap(filename, publicHeaders, privateHeaders):
with open(filename, 'w') as f:
f.write('framework module CartoMobileSDK {\n')
f.write(' umbrella header "CartoMobileSDK.h"\n')
for header in publicHeaders:
f.write(' header "%s"\n' % header)
f.write(' export *\n')
f.write(' module * { export * }\n')
f.write(' explicit module Private {\n')
f.write(' requires cplusplus\n')
for header in privateHeaders:
f.write(' header "%s"\n' % header)
f.write(' }\n')
f.write('}\n')
def buildIOSLib(args, arch):
platform = 'OS' if arch.startswith('arm') else 'SIMULATOR'
version = getVersion(args.buildnumber) if args.configuration == 'Release' else 'Devel'
baseDir = getBaseDir()
buildDir = getBuildDir('ios', '%s-%s' % (platform, arch))
defines = ["-D%s" % define for define in args.defines.split(';') if define]
options = ["-D%s" % option for option in args.cmakeoptions.split(';') if option]
if not cmake(args, buildDir, options + [
'-G', 'Xcode',
'-DCMAKE_SYSTEM_NAME=iOS',
'-DWRAPPER_DIR=%s' % ('%s/generated/ios-objc/proxies' % baseDir),
'-DINCLUDE_OBJC:BOOL=ON',
'-DSINGLE_LIBRARY:BOOL=ON',
'-DENABLE_BITCODE:BOOL=%s' % ('OFF' if args.stripbitcode else 'ON'),
'-DSHARED_LIBRARY:BOOL=%s' % ('ON' if args.sharedlib else 'OFF'),
'-DCMAKE_OSX_ARCHITECTURES=%s' % arch,
'-DCMAKE_OSX_SYSROOT=iphone%s' % platform.lower(),
'-DCMAKE_OSX_DEPLOYMENT_TARGET=%s' % ('9.0' if args.metalangle else '7.0'),
'-DCMAKE_BUILD_TYPE=%s' % args.configuration,
"-DSDK_CPP_DEFINES=%s" % " ".join(defines),
"-DSDK_VERSION='%s'" % version,
"-DSDK_PLATFORM='iOS'",
'%s/scripts/build' % baseDir
]):
return False
return cmake(args, buildDir, [
'--build', '.',
'--config', args.configuration
])
def buildIOSFramework(args, archs):
shutil.rmtree(getDistDir('ios'), True)
platformArchs = [('OS' if arch.startswith('arm') else 'SIMULATOR', arch) for arch in archs]
baseDir = getBaseDir()
distDir = getDistDir('ios')
if args.sharedlib:
outputDir = '%s/CartoMobileSDK.framework' % distDir
else:
outputDir = '%s/CartoMobileSDK.framework/Versions/A' % distDir
makedirs(outputDir)
libFilePaths = []
for platform, arch in platformArchs:
libFilePath = "%s/%s-%s/libcarto_mobile_sdk.%s" % (getBuildDir('ios', '%s-%s' % (platform, arch)), args.configuration, 'iphoneos' if arch.startswith("arm") else 'iphonesimulator', 'dylib' if args.sharedlib else 'a')
if args.metalangle:
mergedLibFilePath = '%s_merged.%s' % tuple(libFilePath.rsplit('.', 1))
angleLibFilePath = "%s/libs-external/angle-metal/%s/libangle.a" % (baseDir, arch)
if not execute('libtool', baseDir,
'-o', mergedLibFilePath, libFilePath, angleLibFilePath
):
return False
libFilePath = mergedLibFilePath
libFilePaths.append(libFilePath)
if not execute('lipo', baseDir,
'-output', '%s/CartoMobileSDK' % outputDir,
'-create', *libFilePaths
):
return False
if args.sharedlib:
if not execute('install_name_tool', outputDir,
'-id', '@rpath/CartoMobileSDK.framework/CartoMobileSDK',
'CartoMobileSDK'
):
return False
if not copyfile('%s/scripts/ios/Info.plist' % baseDir, '%s/Info.plist' % outputDir):
return False
makedirs('%s/Headers' % outputDir)
if not args.sharedlib:
if not makesymlink('%s/CartoMobileSDK.framework/Versions' % distDir, 'A', 'Current'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Modules', 'Modules'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Headers', 'Headers'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/PrivateHeaders', 'PrivateHeaders'):
return False
if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/CartoMobileSDK', 'CartoMobileSDK'):
return False
publicHeaders = []
privateHeaders = []
headerDirTemplates = ['%s/all/native', '%s/ios/native', '%s/ios/objc', '%s/generated/ios-objc/proxies', '%s/libs-external/cglib']
if args.metalangle:
headerDirTemplates.append('%s/libs-external/angle-metal/include')
for headerDirTemplate in headerDirTemplates:
headerDir = headerDirTemplate % baseDir
if not os.path.exists(headerDir):
continue
currentDir = os.getcwd()
os.chdir(headerDir)
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.h'):
destDir = '%s/Headers/%s' % (outputDir, dirpath)
if headerDirTemplate.find('objc') == -1:
destDir = '%s/PrivateHeaders/%s' % (outputDir, dirpath)
privateHeaders.append(os.path.normpath(os.path.join(dirpath, filename)))
elif filename != 'CartoMobileSDK.h':
publicHeaders.append(os.path.normpath(os.path.join(dirpath, filename)))
if not (makedirs(destDir) and copyfile(os.path.join(dirpath, filename), '%s/%s' % (destDir, filename))):
os.chdir(currentDir)
return False
if filename == 'CartoMobileSDK.h':
updateUmbrellaHeader('%s/%s' % (destDir, filename), args)
else:
updatePrivateHeader('%s/%s' % (destDir, filename), args)
os.chdir(currentDir)
makedirs('%s/Modules' % outputDir)
buildModuleMap('%s/Modules/module.modulemap' % outputDir, publicHeaders, privateHeaders)
print("Output available in:\n%s" % distDir)
return True
def buildIOSCocoapod(args, buildpackage):
baseDir = getBaseDir()
distDir = getDistDir('ios')
version = args.buildversion
distName = 'sdk4-ios-%s.zip' % version
iosversion = '9.0' if args.metalangle else '7.0'
frameworks = (["IOSurface"] if args.metalangle else ["OpenGLES", "GLKit"]) + ["UIKit", "CoreGraphics", "CoreText", "CFNetwork", "Foundation", "CartoMobileSDK"]
with open('%s/scripts/ios-cocoapod/CartoMobileSDK.podspec.template' % baseDir, 'r') as f:
cocoapodFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'distDir': distDir, 'distName': distName, 'version': version, 'iosversion': iosversion, 'frameworks': ', '.join('"%s"' % framework for framework in frameworks) })
with open('%s/CartoMobileSDK.podspec' % distDir, 'w') as f:
f.write(cocoapodFile)
if buildpackage:
try:
os.remove('%s/%s' % (distDir, distName))
except:
pass
if not execute('zip', distDir, '-y', '-r', distName, 'CartoMobileSDK.framework'):
return False
print("Output available in:\n%s\n\nTo publish, use:\ncd %s\naws s3 cp %s s3://nutifront/sdk_snapshots/%s --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers\npod trunk push\n" % (distDir, distDir, distName, distName))
return True
parser = argparse.ArgumentParser()
parser.add_argument('--profile', dest='profile', default=getDefaultProfileId(), type=validProfile, help='Build profile')
parser.add_argument('--ios-arch', dest='iosarch', default=[], choices=IOS_ARCHS + ['all'], action='append', help='iOS target architectures')
parser.add_argument('--defines', dest='defines', default='', help='Defines for compilation')
parser.add_argument('--cmake', dest='cmake', default='cmake', help='CMake executable')
parser.add_argument('--cmake-options', dest='cmakeoptions', default='', help='CMake options')
parser.add_argument('--configuration', dest='configuration', default='Release', choices=['Release', 'RelWithDebInfo', 'Debug'], help='Configuration')
parser.add_argument('--build-number', dest='buildnumber', default='', help='Build sequence number, goes to version str')
parser.add_argument('--build-version', dest='buildversion', default='%s-devel' % SDK_VERSION, help='Build version, goes to distributions')
parser.add_argument('--build-cocoapod', dest='buildcocoapod', default=False, action='store_true', help='Build CocoaPod')
parser.add_argument('--build-cocoapod-package', dest='buildcocoapodpackage', default=False, action='store_true', help='Build CocoaPod')
parser.add_argument('--metalangle', dest='metalangle', default=False, action='store_true', help='Use MetalANGLE instead of Apple GL')
parser.add_argument('--strip-bitcode', dest='stripbitcode', default=False, action='store_true', help='Strip bitcode from the built framework')
parser.add_argument('--shared-framework', dest='sharedlib', default=False, action='store_true', help='Build shared framework instead of static')
args = parser.parse_args()
if 'all' in args.iosarch or args.iosarch == []:
args.iosarch = IOS_ARCHS
args.defines += ';' + getProfile(args.profile).get('defines', '')
if args.metalangle:
args.defines += ';' + '_CARTO_USE_METALANGLE'
args.cmakeoptions += ';' + getProfile(args.profile).get('cmake-options', '')
if not checkExecutable(args.cmake, '--help'):
print('Failed to find CMake executable. Use --cmake to specify its location')
sys.exit(-1)
for arch in args.iosarch:
if not buildIOSLib(args, arch):
sys.exit(-1)
if not buildIOSFramework(args, args.iosarch):
sys.exit(-1)
if args.buildcocoapod or args.buildcocoapodpackage:
if not buildIOSCocoapod(args, args.buildcocoapodpackage):
sys.exit(-1)
| true
| true
|
f716db30ad2b5a1b0baa72fd74c8fbb701037f01
| 1,189
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
|
TolyaTalamanov/open_model_zoo
|
1697e60712df4ca72635a2080a197b9d3bc24129
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .postprocessor import Postprocessor
class SalientMapNormalizer(Postprocessor):
__provider__ = 'normalize_salience_map'
def process_image(self, annotation, prediction):
for ann in annotation:
gt_mask = ann.mask
if len(gt_mask.shape) == 3 and gt_mask.shape[-1] == 3:
gt_mask = cv2.cvtColor(gt_mask, cv2.COLOR_BGR2GRAY)
gt_mask = gt_mask / 255
gt_mask[gt_mask >= 0.5] = 1
gt_mask[gt_mask < 0.5] = 0
ann.mask = gt_mask.astype(np.uint8)
return annotation, prediction
| 34.970588
| 72
| 0.702271
|
import cv2
import numpy as np
from .postprocessor import Postprocessor
class SalientMapNormalizer(Postprocessor):
__provider__ = 'normalize_salience_map'
def process_image(self, annotation, prediction):
for ann in annotation:
gt_mask = ann.mask
if len(gt_mask.shape) == 3 and gt_mask.shape[-1] == 3:
gt_mask = cv2.cvtColor(gt_mask, cv2.COLOR_BGR2GRAY)
gt_mask = gt_mask / 255
gt_mask[gt_mask >= 0.5] = 1
gt_mask[gt_mask < 0.5] = 0
ann.mask = gt_mask.astype(np.uint8)
return annotation, prediction
| true
| true
|
f716dbad5d5deabbd30519182541764ab0c17a2f
| 10,478
|
py
|
Python
|
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 12
|
2019-07-23T08:07:53.000Z
|
2022-03-09T06:13:16.000Z
|
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 7
|
2019-08-30T07:00:00.000Z
|
2021-12-30T08:02:56.000Z
|
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 5
|
2020-10-10T03:40:32.000Z
|
2021-11-23T12:28:53.000Z
|
# -*- coding: utf-8 -*-
"""
*****************************
Time-respecting VF2 Algorithm
*****************************
An extension of the VF2 algorithm for time-respecting graph ismorphism
testing in temporal graphs.
A temporal graph is one in which edges contain a datetime attribute,
denoting when interaction occurred between the incident nodes. A
time-respecting subgraph of a temporal graph is a subgraph such that
all interactions incident to a node occurred within a time threshold,
delta, of each other. A directed time-respecting subgraph has the
added constraint that incoming interactions to a node must precede
outgoing interactions from the same node - this enforces a sense of
directed flow.
Introduction
------------
The TimeRespectingGraphMatcher and TimeRespectingDiGraphMatcher
extend the GraphMatcher and DiGraphMatcher classes, respectively,
to include temporal constraints on matches. This is achieved through
a semantic check, via the semantic_feasibility() function.
As well as including G1 (the graph in which to seek embeddings) and
G2 (the subgraph structure of interest), the name of the temporal
attribute on the edges and the time threshold, delta, must be supplied
as arguments to the matching constructors.
A delta of zero is the strictest temporal constraint on the match -
only embeddings in which all interactions occur at the same time will
be returned. A delta of one day will allow embeddings in which
adjacent interactions occur up to a day apart.
Examples
--------
Examples will be provided when the datetime type has been incorporated.
Temporal Subgraph Isomorphism
-----------------------------
A brief discussion of the somewhat diverse current literature will be
included here.
References
----------
[1] Redmond, U. and Cunningham, P. Temporal subgraph isomorphism. In:
The 2013 IEEE/ACM International Conference on Advances in Social
Networks Analysis and Mining (ASONAM). Niagara Falls, Canada; 2013:
pages 1451 - 1452. [65]
For a discussion of the literature on temporal networks:
[3] P. Holme and J. Saramaki. Temporal networks. Physics Reports,
519(3):97–125, 2012.
Notes
-----
Handles directed and undirected graphs and graphs with parallel edges.
"""
from __future__ import absolute_import
import cynetworkx as nx
from datetime import datetime, timedelta
from .isomorphvf2 import GraphMatcher, DiGraphMatcher
__all__ = ['TimeRespectingGraphMatcher',
'TimeRespectingDiGraphMatcher']
class TimeRespectingGraphMatcher(GraphMatcher):
def __init__(self, G1, G2, temporal_attribute_name, delta):
"""Initialize TimeRespectingGraphMatcher.
G1 and G2 should be nx.Graph or nx.MultiGraph instances.
Examples
--------
To create a TimeRespectingGraphMatcher which checks for
syntactic and semantic feasibility:
>>> from cynetworkx.algorithms import isomorphism
>>> G1 = nx.Graph(nx.path_graph(4, create_using=nx.Graph()))
>>> G2 = nx.Graph(nx.path_graph(4, create_using=nx.Graph()))
>>> GM = isomorphism.TimeRespectingGraphMatcher(G1, G2, 'date', timedelta(days=1))
"""
self.temporal_attribute_name = temporal_attribute_name
self.delta = delta
super(TimeRespectingGraphMatcher, self).__init__(G1, G2)
def one_hop(self, Gx, Gx_node, neighbors):
"""
Edges one hop out from a node in the mapping should be
time-respecting with respect to each other.
"""
dates = []
for n in neighbors:
if type(Gx) == type(nx.Graph()): # Graph G[u][v] returns the data dictionary.
dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
for edge in Gx[Gx_node][n].values(): # Iterates all edges between node pair.
dates.append(edge[self.temporal_attribute_name])
if any(x is None for x in dates):
raise ValueError('Datetime not supplied for at least one edge.')
return not dates or max(dates) - min(dates) <= self.delta
def two_hop(self, Gx, core_x, Gx_node, neighbors):
"""
Paths of length 2 from Gx_node should be time-respecting.
"""
return all(self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) for v in neighbors)
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if adding (G1_node, G2_node) is semantically
feasible.
Any subclass which redefines semantic_feasibility() must
maintain the self.tests if needed, to keep the match() method
functional. Implementations should consider multigraphs.
"""
neighbors = [n for n in self.G1[G1_node] if n in self.core_1]
if not self.one_hop(self.G1, G1_node, neighbors): # Fail fast on first node.
return False
if not self.two_hop(self.G1, self.core_1, G1_node, neighbors):
return False
# Otherwise, this node is semantically feasible!
return True
class TimeRespectingDiGraphMatcher(DiGraphMatcher):
def __init__(self, G1, G2, temporal_attribute_name, delta):
"""Initialize TimeRespectingDiGraphMatcher.
G1 and G2 should be nx.DiGraph or nx.MultiDiGraph instances.
Examples
--------
To create a TimeRespectingDiGraphMatcher which checks for
syntactic and semantic feasibility:
>>> from cynetworkx.algorithms import isomorphism
>>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
>>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph()))
>>> GM = isomorphism.TimeRespectingDiGraphMatcher(G1, G2, 'date', timedelta(days=1))
"""
self.temporal_attribute_name = temporal_attribute_name
self.delta = delta
super(TimeRespectingDiGraphMatcher, self).__init__(G1, G2)
def get_pred_dates(self, Gx, Gx_node, core_x, pred):
"""
Get the dates of edges from predecessors.
"""
pred_dates = []
if type(Gx) == type(nx.DiGraph()): # Graph G[u][v] returns the data dictionary.
for n in pred:
pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name])
else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
for n in pred:
for edge in Gx[n][Gx_node].values(): # Iterates all edge data between node pair.
pred_dates.append(edge[self.temporal_attribute_name])
return pred_dates
def get_succ_dates(self, Gx, Gx_node, core_x, succ):
"""
Get the dates of edges to successors.
"""
succ_dates = []
if type(Gx) == type(nx.DiGraph()): # Graph G[u][v] returns the data dictionary.
for n in succ:
succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
for n in succ:
for edge in Gx[Gx_node][n].values(): # Iterates all edge data between node pair.
succ_dates.append(edge[self.temporal_attribute_name])
return succ_dates
def one_hop(self, Gx, Gx_node, core_x, pred, succ):
"""
The ego node.
"""
pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred)
succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ)
return self.test_one(pred_dates, succ_dates) and self.test_two(pred_dates, succ_dates)
def two_hop_pred(self, Gx, Gx_node, core_x, pred):
"""
The predeccessors of the ego node.
"""
return all(self.one_hop(Gx, p, core_x, self.preds(Gx, core_x, p), self.succs(Gx, core_x, p, Gx_node)) for p in pred)
def two_hop_succ(self, Gx, Gx_node, core_x, succ):
"""
The successors of the ego node.
"""
return all(self.one_hop(Gx, s, core_x, self.preds(Gx, core_x, s, Gx_node), self.succs(Gx, core_x, s)) for s in succ)
def preds(self, Gx, core_x, v, Gx_node=None):
pred = [n for n in Gx.predecessors(v) if n in core_x]
if Gx_node:
pred.append(Gx_node)
return pred
def succs(self, Gx, core_x, v, Gx_node=None):
succ = [n for n in Gx.successors(v) if n in core_x]
if Gx_node:
succ.append(Gx_node)
return succ
def test_one(self, pred_dates, succ_dates):
"""
Edges one hop out from Gx_node in the mapping should be
time-respecting with respect to each other, regardless of
direction.
"""
time_respecting = True
dates = pred_dates + succ_dates
if any(x is None for x in dates):
raise ValueError('Date or datetime not supplied for at least one edge.')
dates.sort() # Small to large.
if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta):
time_respecting = False
return time_respecting
def test_two(self, pred_dates, succ_dates):
"""
Edges from a dual Gx_node in the mapping should be ordered in
a time-respecting manner.
"""
time_respecting = True
pred_dates.sort()
succ_dates.sort()
# First out before last in; negative of the necessary condition for time-respect.
if 0 < len(succ_dates) and 0 < len(pred_dates) and succ_dates[0] < pred_dates[-1]:
time_respecting = False
return time_respecting
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if adding (G1_node, G2_node) is semantically
feasible.
Any subclass which redefines semantic_feasibility() must
maintain the self.tests if needed, to keep the match() method
functional. Implementations should consider multigraphs.
"""
pred, succ = [n for n in self.G1.predecessors(G1_node) if n in self.core_1], [
n for n in self.G1.successors(G1_node) if n in self.core_1]
if not self.one_hop(self.G1, G1_node, self.core_1, pred, succ): # Fail fast on first node.
return False
if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred):
return False
if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ):
return False
# Otherwise, this node is semantically feasible!
return True
| 38.664207
| 124
| 0.655564
|
from __future__ import absolute_import
import cynetworkx as nx
from datetime import datetime, timedelta
from .isomorphvf2 import GraphMatcher, DiGraphMatcher
__all__ = ['TimeRespectingGraphMatcher',
'TimeRespectingDiGraphMatcher']
class TimeRespectingGraphMatcher(GraphMatcher):
def __init__(self, G1, G2, temporal_attribute_name, delta):
self.temporal_attribute_name = temporal_attribute_name
self.delta = delta
super(TimeRespectingGraphMatcher, self).__init__(G1, G2)
def one_hop(self, Gx, Gx_node, neighbors):
dates = []
for n in neighbors:
if type(Gx) == type(nx.Graph()):
dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
else:
for edge in Gx[Gx_node][n].values():
dates.append(edge[self.temporal_attribute_name])
if any(x is None for x in dates):
raise ValueError('Datetime not supplied for at least one edge.')
return not dates or max(dates) - min(dates) <= self.delta
def two_hop(self, Gx, core_x, Gx_node, neighbors):
return all(self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) for v in neighbors)
def semantic_feasibility(self, G1_node, G2_node):
neighbors = [n for n in self.G1[G1_node] if n in self.core_1]
if not self.one_hop(self.G1, G1_node, neighbors):
return False
if not self.two_hop(self.G1, self.core_1, G1_node, neighbors):
return False
return True
class TimeRespectingDiGraphMatcher(DiGraphMatcher):
def __init__(self, G1, G2, temporal_attribute_name, delta):
self.temporal_attribute_name = temporal_attribute_name
self.delta = delta
super(TimeRespectingDiGraphMatcher, self).__init__(G1, G2)
def get_pred_dates(self, Gx, Gx_node, core_x, pred):
pred_dates = []
if type(Gx) == type(nx.DiGraph()):
for n in pred:
pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name])
else:
for n in pred:
for edge in Gx[n][Gx_node].values():
pred_dates.append(edge[self.temporal_attribute_name])
return pred_dates
def get_succ_dates(self, Gx, Gx_node, core_x, succ):
succ_dates = []
if type(Gx) == type(nx.DiGraph()):
for n in succ:
succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
else:
for n in succ:
for edge in Gx[Gx_node][n].values():
succ_dates.append(edge[self.temporal_attribute_name])
return succ_dates
def one_hop(self, Gx, Gx_node, core_x, pred, succ):
pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred)
succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ)
return self.test_one(pred_dates, succ_dates) and self.test_two(pred_dates, succ_dates)
def two_hop_pred(self, Gx, Gx_node, core_x, pred):
return all(self.one_hop(Gx, p, core_x, self.preds(Gx, core_x, p), self.succs(Gx, core_x, p, Gx_node)) for p in pred)
def two_hop_succ(self, Gx, Gx_node, core_x, succ):
return all(self.one_hop(Gx, s, core_x, self.preds(Gx, core_x, s, Gx_node), self.succs(Gx, core_x, s)) for s in succ)
def preds(self, Gx, core_x, v, Gx_node=None):
pred = [n for n in Gx.predecessors(v) if n in core_x]
if Gx_node:
pred.append(Gx_node)
return pred
def succs(self, Gx, core_x, v, Gx_node=None):
succ = [n for n in Gx.successors(v) if n in core_x]
if Gx_node:
succ.append(Gx_node)
return succ
def test_one(self, pred_dates, succ_dates):
time_respecting = True
dates = pred_dates + succ_dates
if any(x is None for x in dates):
raise ValueError('Date or datetime not supplied for at least one edge.')
dates.sort()
if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta):
time_respecting = False
return time_respecting
def test_two(self, pred_dates, succ_dates):
time_respecting = True
pred_dates.sort()
succ_dates.sort()
if 0 < len(succ_dates) and 0 < len(pred_dates) and succ_dates[0] < pred_dates[-1]:
time_respecting = False
return time_respecting
def semantic_feasibility(self, G1_node, G2_node):
pred, succ = [n for n in self.G1.predecessors(G1_node) if n in self.core_1], [
n for n in self.G1.successors(G1_node) if n in self.core_1]
if not self.one_hop(self.G1, G1_node, self.core_1, pred, succ):
return False
if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred):
return False
if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ):
return False
return True
| true
| true
|
f716dcbb37cfe64fdd5bab29b9a4cfa9b3b000b2
| 33,058
|
py
|
Python
|
pyccel/ast/operators.py
|
jalalium/pyccel
|
4f3d9a359e42c16440e9c841059257d292a8361b
|
[
"MIT"
] | null | null | null |
pyccel/ast/operators.py
|
jalalium/pyccel
|
4f3d9a359e42c16440e9c841059257d292a8361b
|
[
"MIT"
] | null | null | null |
pyccel/ast/operators.py
|
jalalium/pyccel
|
4f3d9a359e42c16440e9c841059257d292a8361b
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
# TODO [EB 12.03.21]: Remove pylint command with PR #797
# pylint: disable=W0201
"""
Module handling all python builtin operators
These operators all have a precision as detailed here:
https://docs.python.org/3/reference/expressions.html#operator-precedence
They also have specific rules to determine the dtype, precision, rank, shape
"""
from ..errors.errors import Errors, PyccelSemanticError
from .basic import PyccelAstNode
from .datatypes import (NativeBool, NativeInteger, NativeReal,
NativeComplex, NativeString, default_precision,
NativeNumeric)
from .literals import Literal, LiteralInteger, LiteralFloat, LiteralComplex, Nil
from .literals import convert_to_literal
errors = Errors()
__all__ = (
'PyccelOperator',
'PyccelPow',
'PyccelAdd',
'PyccelMinus',
'PyccelMul',
'PyccelDiv',
'PyccelMod',
'PyccelFloorDiv',
'PyccelEq',
'PyccelNe',
'PyccelLt',
'PyccelLe',
'PyccelGt',
'PyccelGe',
'PyccelAnd',
'PyccelOr',
'PyccelNot',
'PyccelAssociativeParenthesis',
'PyccelUnary',
'PyccelUnarySub',
'Relational',
'PyccelIs',
'PyccelIsNot',
'IfTernaryOperator'
)
#==============================================================================
def broadcast(shape_1, shape_2):
""" This function broadcast two shapes using numpy broadcasting rules """
from pyccel.ast.sympy_helper import pyccel_to_sympy
a = len(shape_1)
b = len(shape_2)
if a>b:
new_shape_2 = (LiteralInteger(1),)*(a-b) + tuple(shape_2)
new_shape_1 = shape_1
elif b>a:
new_shape_1 = (LiteralInteger(1),)*(b-a) + tuple(shape_1)
new_shape_2 = shape_2
else:
new_shape_2 = shape_2
new_shape_1 = shape_1
new_shape = []
for e1,e2 in zip(new_shape_1, new_shape_2):
used_names = set()
symbol_map = {}
sy_e1 = pyccel_to_sympy(e1, symbol_map, used_names)
sy_e2 = pyccel_to_sympy(e2, symbol_map, used_names)
if sy_e1 == sy_e2:
new_shape.append(e1)
elif sy_e1 == 1:
new_shape.append(e2)
elif sy_e2 == 1:
new_shape.append(e1)
elif sy_e1.is_constant() and not sy_e2.is_constant():
new_shape.append(e1)
elif sy_e2.is_constant() and not sy_e1.is_constant():
new_shape.append(e2)
elif not sy_e2.is_constant() and not sy_e1.is_constant()\
and not (sy_e1 - sy_e2).is_constant():
new_shape.append(e1)
else:
shape1_code = '({})'.format(' '.join([str(s)+',' for s in shape_1]))
shape2_code = '({})'.format(' '.join([str(s)+',' for s in shape_2]))
msg = 'operands could not be broadcast together with shapes {} {}'
msg = msg.format(shape1_code, shape2_code)
raise PyccelSemanticError(msg)
return tuple(new_shape)
#==============================================================================
class PyccelOperator(PyccelAstNode):
"""
Abstract superclass for all builtin operators.
The __init__ function is common
but the functions called by __init__ are specialised
Parameters
----------
args: tuple
The arguments passed to the operator
"""
__slots__ = ('_args', )
_attribute_nodes = ('_args',)
def __init__(self, *args):
self._args = tuple(self._handle_precedence(args))
if self.stage == 'syntactic':
super().__init__()
return
self._set_dtype()
self._set_shape_rank()
# rank is None for lambda functions
self._set_order()
super().__init__()
def _set_dtype(self):
self._dtype, self._precision = self._calculate_dtype(*self._args) # pylint: disable=no-member
def _set_shape_rank(self):
self._shape, self._rank = self._calculate_shape_rank(*self._args) # pylint: disable=no-member
@property
def precedence(self):
""" The precedence of the operator as defined here:
https://docs.python.org/3/reference/expressions.html#operator-precedence
"""
return self._precedence
def _handle_precedence(self, args):
"""
Insert parentheses where necessary by examining the precedence of the operator
e.g:
PyccelMul(a,PyccelAdd(b,c))
means:
a*(b+c)
so this input will give:
PyccelMul(a, PyccelAssociativeParenthesis(PyccelAdd(b,c)))
Parentheses are also added were they are required for clarity
Parameters
----------
args: tuple
The arguments passed to the operator
Results
-------
args: tuple
The arguments with the parentheses inserted
"""
precedence = [getattr(a, 'precedence', 17) for a in args]
if min(precedence) <= self._precedence:
new_args = []
for i, (a,p) in enumerate(zip(args, precedence)):
if (p < self._precedence or (p == self._precedence and i != 0)):
new_args.append(PyccelAssociativeParenthesis(a))
else:
new_args.append(a)
args = tuple(new_args)
return args
def __str__(self):
return repr(self)
def _set_order(self):
""" Sets the shape and rank
This is chosen to match the arguments if they are in agreement.
Otherwise it defaults to 'C'
"""
if self._rank is not None and self._rank > 1:
if all(a.order == self._args[0].order for a in self._args):
self._order = self._args[0].order
else:
self._order = 'C'
else:
self._order = None
@property
def args(self):
""" Arguments of the operator
"""
return self._args
#==============================================================================
class PyccelUnaryOperator(PyccelOperator):
""" Abstract superclass representing a python
operator with only one argument
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ('_dtype', '_precision','_shape','_rank','_order')
def __init__(self, arg):
super().__init__(arg)
@staticmethod
def _calculate_dtype(*args):
""" Sets the dtype and precision
They are chosen to match the argument
"""
a = args[0]
dtype = a.dtype
precision = a.precision
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
They are chosen to match the argument
"""
a = args[0]
rank = a.rank
shape = a.shape
return shape, rank
#==============================================================================
class PyccelUnary(PyccelUnaryOperator):
"""
Class representing a call to the python positive operator.
I.e:
+a
is equivalent to:
PyccelUnary(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
_precedence = 14
def _handle_precedence(self, args):
args = PyccelUnaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
def __repr__(self):
return '+{}'.format(repr(self.args[0]))
#==============================================================================
class PyccelUnarySub(PyccelUnary):
"""
Class representing a call to the python negative operator.
I.e:
-a
is equivalent to:
PyccelUnarySub(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '-{}'.format(repr(self.args[0]))
#==============================================================================
class PyccelNot(PyccelUnaryOperator):
"""
Class representing a call to the python not operator.
I.e:
not a
is equivalent to:
PyccelNot(a)
Parameters
----------
arg: PyccelAstNode
The argument passed to the operator
"""
__slots__ = ()
_precedence = 6
@staticmethod
def _calculate_dtype(*args):
""" Sets the dtype and precision
They are chosen to match the argument unless the class has
a _dtype or _precision member
"""
dtype = NativeBool()
precision = default_precision['bool']
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
They are chosen to match the argument unless the class has
a _shape or _rank member
"""
rank = 0
shape = ()
return shape, rank
def __repr__(self):
return 'not {}'.format(repr(self.args[0]))
#==============================================================================
class PyccelAssociativeParenthesis(PyccelUnaryOperator):
"""
Class representing parentheses
Parameters
----------
arg: PyccelAstNode
The argument in the PyccelAssociativeParenthesis
"""
__slots__ = () # ok
_precedence = 18
def _handle_precedence(self, args):
return args
def __repr__(self):
return '({})'.format(repr(self.args[0]))
#==============================================================================
class PyccelBinaryOperator(PyccelOperator):
""" Abstract superclass representing a python
operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
def __init__(self, arg1, arg2, simplify = False):
super().__init__(arg1, arg2)
@classmethod
def _calculate_dtype(cls, *args):
""" Sets the dtype and precision
If one argument is a string then all arguments must be strings
If the arguments are numeric then the dtype and precision
match the broadest type and the largest precision
e.g.
1 + 2j -> PyccelAdd(LiteralInteger, LiteralComplex) -> complex
"""
integers = [a for a in args if a.dtype in (NativeInteger(),NativeBool())]
reals = [a for a in args if a.dtype is NativeReal()]
complexes = [a for a in args if a.dtype is NativeComplex()]
strs = [a for a in args if a.dtype is NativeString()]
if strs:
return cls._handle_str_type(strs)
assert len(integers + reals + complexes) == 0
elif complexes:
return cls._handle_complex_type(complexes)
elif reals:
return cls._handle_real_type(reals)
elif integers:
return cls._handle_integer_type(integers)
else:
raise TypeError('cannot determine the type of {}'.format(args))
@staticmethod
def _handle_str_type(strs):
"""
Set dtype and precision when both arguments are strings
"""
raise TypeError("unsupported operand type(s) for /: 'str' and 'str'")
@staticmethod
def _handle_complex_type(complexes):
"""
Set dtype and precision when the result is complex
"""
dtype = NativeComplex()
precision = max(a.precision for a in complexes)
return dtype, precision
@staticmethod
def _handle_real_type(reals):
"""
Set dtype and precision when the result is real
"""
dtype = NativeReal()
precision = max(a.precision for a in reals)
return dtype, precision
@staticmethod
def _handle_integer_type(integers):
"""
Set dtype and precision when the result is integer
"""
dtype = NativeInteger()
precision = max(a.precision for a in integers)
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
""" Sets the shape and rank
Strings must be scalars.
For numeric types the rank and shape is determined according
to numpy broadcasting rules where possible
"""
strs = [a for a in args if a.dtype is NativeString()]
if strs:
other = [a for a in args if a.dtype in (NativeInteger(), NativeBool(), NativeReal(), NativeComplex())]
assert len(other) == 0
rank = 0
shape = ()
else:
ranks = [a.rank for a in args]
shapes = [a.shape for a in args]
if None in ranks:
rank = None
shape = None
elif all(sh is not None for tup in shapes for sh in tup):
s = broadcast(args[0].shape, args[1].shape)
shape = s
rank = len(s)
else:
rank = max(a.rank for a in args)
shape = [None]*rank
return shape, rank
#==============================================================================
class PyccelArithmeticOperator(PyccelBinaryOperator):
""" Abstract superclass representing a python
arithmetic operator
This class is necessary to handle specific precedence
rules for arithmetic operators
I.e. to handle the error:
Extension: Unary operator following arithmetic operator (use parentheses)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def _handle_precedence(self, args):
args = PyccelBinaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
#==============================================================================
class PyccelPow(PyccelArithmeticOperator):
"""
Class representing a call to the python exponent operator.
I.e:
a ** b
is equivalent to:
PyccelPow(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 15
def __repr__(self):
return '{} ** {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelAdd(PyccelArithmeticOperator):
"""
Class representing a call to the python addition operator.
I.e:
a + b
is equivalent to:
PyccelAdd(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelMinus(arg1, arg2.args[0], simplify = True)
dtype, precision = cls._calculate_dtype(arg1, arg2)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
return convert_to_literal(arg1.python_value + arg2.python_value,
dtype, precision)
if dtype == arg2.dtype and precision == arg2.precision and \
isinstance(arg1, Literal) and arg1.python_value == 0:
return arg2
if dtype == arg1.dtype and precision == arg1.precision and \
isinstance(arg2, Literal) and arg2.python_value == 0:
return arg1
if isinstance(arg1, (LiteralInteger, LiteralFloat)) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, arg2.imag)
elif isinstance(arg2, (LiteralInteger, LiteralFloat)) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(arg2, arg1.imag)
else:
return super().__new__(cls)
@staticmethod
def _handle_str_type(strs):
dtype = NativeString()
precision = None
return dtype, precision
def __repr__(self):
return '{} + {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMul(PyccelArithmeticOperator):
"""
Class representing a call to the python multiplication operator.
I.e:
a * b
is equivalent to:
PyccelMul(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if (arg1 == 1):
return arg2
if (arg2 == 1):
return arg1
if (arg1 == 0 or arg2 == 0):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(0, dtype, precision)
if (isinstance(arg1, PyccelUnarySub) and arg1.args[0] == 1):
return PyccelUnarySub(arg2)
if (isinstance(arg2, PyccelUnarySub) and arg2.args[0] == 1):
return PyccelUnarySub(arg1)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value * arg2.python_value,
dtype, precision)
return super().__new__(cls)
def __repr__(self):
return '{} * {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMinus(PyccelArithmeticOperator):
"""
Class representing a call to the python subtraction operator.
I.e:
a - b
is equivalent to:
PyccelMinus(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelAdd(arg1, arg2.args[0], simplify = True)
elif isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value - arg2.python_value,
dtype, precision)
if isinstance(arg1, LiteralFloat) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, -arg2.imag.python_value)
elif isinstance(arg2, LiteralFloat) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(-arg2.python_value, arg1.imag)
else:
return super().__new__(cls)
def __repr__(self):
return '{} - {}'.format(repr(self.args[0]), repr(self.args[1]))
#==============================================================================
class PyccelDiv(PyccelArithmeticOperator):
"""
Class representing a call to the python division operator.
I.e:
a / b
is equivalent to:
PyccelDiv(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify=False):
if simplify:
if (arg2 == 1):
return arg1
return super().__new__(cls)
@staticmethod
def _handle_integer_type(integers):
dtype = NativeReal()
precision = default_precision['real']
return dtype, precision
def __repr__(self):
return '{} + {}'.format(self.args[0], self.args[1])
def __repr__(self):
return '{} / {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelMod(PyccelArithmeticOperator):
"""
Class representing a call to the python modulo operator.
I.e:
a % b
is equivalent to:
PyccelMod(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} % {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelFloorDiv(PyccelArithmeticOperator):
"""
Class representing a call to the python integer division operator.
I.e:
a // b
is equivalent to:
PyccelFloorDiv(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} // {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelComparisonOperator(PyccelBinaryOperator):
""" Abstract superclass representing a python
comparison operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 7
@staticmethod
def _calculate_dtype(*args):
dtype = NativeBool()
precision = default_precision['bool']
return dtype, precision
#==============================================================================
class PyccelEq(PyccelComparisonOperator):
"""
Class representing a call to the python equality operator.
I.e:
a == b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} == {}'.format(self.args[0], self.args[1])
class PyccelNe(PyccelComparisonOperator):
"""
Class representing a call to the python inequality operator.
I.e:
a != b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} != {}'.format(self.args[0], self.args[1])
class PyccelLt(PyccelComparisonOperator):
"""
Class representing a call to the python less than operator.
I.e:
a < b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} < {}'.format(self.args[0], self.args[1])
class PyccelLe(PyccelComparisonOperator):
"""
Class representing a call to the python less or equal operator.
I.e:
a <= b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} <= {}'.format(self.args[0], self.args[1])
class PyccelGt(PyccelComparisonOperator):
"""
Class representing a call to the python greater than operator.
I.e:
a > b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} > {}'.format(self.args[0], self.args[1])
class PyccelGe(PyccelComparisonOperator):
"""
Class representing a call to the python greater or equal operator.
I.e:
a >= b
is equivalent to:
PyccelEq(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
def __repr__(self):
return '{} >= {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelBooleanOperator(PyccelOperator):
""" Abstract superclass representing a python
boolean operator with two arguments
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
dtype = NativeBool()
precision = default_precision['bool']
rank = 0
shape = ()
order = None
__slots__ = ()
def _set_order(self):
pass
def _set_dtype(self):
pass
def _set_shape_rank(self):
pass
#==============================================================================
class PyccelAnd(PyccelBooleanOperator):
"""
Class representing a call to the python AND operator.
I.e:
a and b
is equivalent to:
PyccelAnd(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 5
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelOr) else a for a in args)
return args
def __repr__(self):
return '{} and {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelOr(PyccelBooleanOperator):
"""
Class representing a call to the python OR operator.
I.e:
a or b
is equivalent to:
PyccelOr(a, b)
Parameters
----------
arg1: PyccelAstNode
The first argument passed to the operator
arg2: PyccelAstNode
The second argument passed to the operator
"""
__slots__ = ()
_precedence = 4
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelAnd) else a for a in args)
return args
def __repr__(self):
return '{} or {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelIs(PyccelBooleanOperator):
"""Represents a is expression in the code.
Examples
--------
>>> from pyccel.ast.operators import PyccelIs
>>> from pyccel.ast.literals import Nil
>>> from pyccel.ast.internals import PyccelSymbol
>>> x = PyccelSymbol('x')
>>> PyccelIs(x, Nil())
PyccelIs(x, None)
"""
__slots__ = ()
_precedence = 7
def __init__(self, arg1, arg2):
super().__init__(arg1, arg2)
@property
def lhs(self):
""" First operator argument"""
return self._args[0]
@property
def rhs(self):
""" First operator argument"""
return self._args[1]
def __repr__(self):
return '{} is {}'.format(self.args[0], self.args[1])
#==============================================================================
class PyccelIsNot(PyccelIs):
"""Represents a is expression in the code.
Examples
--------
>>> from pyccel.ast.operators import PyccelIsNot
>>> from pyccel.ast.literals import Nil
>>> from pyccel.ast.internals import PyccelSymbol
>>> x = PyccelSymbol('x')
>>> PyccelIsNot(x, Nil())
PyccelIsNot(x, None)
"""
__slots__ = ()
def __repr__(self):
return '{} is not {}'.format(self.args[0], self.args[1])
#==============================================================================
class IfTernaryOperator(PyccelOperator):
"""Represent a ternary conditional operator in the code, of the form (a if cond else b)
Parameters
----------
args :
args : type list
format : condition , value_if_true, value_if_false
Examples
--------
>>> from pyccel.ast.internals import PyccelSymbol
>>> from pyccel.ast.core import Assign
>>> from pyccel.ast.operators import IfTernaryOperator
>>> n = PyccelSymbol('n')
>>> x = 5 if n > 1 else 2
>>> IfTernaryOperator(PyccelGt(n > 1), 5, 2)
IfTernaryOperator(PyccelGt(n > 1), 5, 2)
"""
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
_precedence = 3
def __init__(self, cond, value_true, value_false):
super().__init__(cond, value_true, value_false)
if self.stage == 'syntactic':
return
if isinstance(value_true , Nil) or isinstance(value_false, Nil):
errors.report('None is not implemented for Ternary Operator', severity='fatal')
if isinstance(value_true , NativeString) or isinstance(value_false, NativeString):
errors.report('String is not implemented for Ternary Operator', severity='fatal')
if value_true.dtype != value_false.dtype:
if value_true.dtype not in NativeNumeric or value_false.dtype not in NativeNumeric:
errors.report('The types are incompatible in IfTernaryOperator', severity='fatal')
if value_false.rank != value_true.rank :
errors.report('Ternary Operator results should have the same rank', severity='fatal')
if value_false.shape != value_true.shape :
errors.report('Ternary Operator results should have the same shape', severity='fatal')
@staticmethod
def _calculate_dtype(cond, value_true, value_false):
"""
Sets the dtype and precision for IfTernaryOperator
"""
if value_true.dtype in NativeNumeric and value_false.dtype in NativeNumeric:
dtype = max([value_true.dtype, value_false.dtype], key = NativeNumeric.index)
else:
dtype = value_true.dtype
precision = max([value_true.precision, value_false.precision])
return dtype, precision
@staticmethod
def _calculate_shape_rank(cond, value_true, value_false):
"""
Sets the shape and rank and the order for IfTernaryOperator
"""
shape = value_true.shape
rank = value_true.rank
if rank is not None and rank > 1:
if value_false.order != value_true.order :
errors.report('Ternary Operator results should have the same order', severity='fatal')
return shape, rank
@property
def cond(self):
"""
The condition property for IfTernaryOperator class
"""
return self._args[0]
@property
def value_true(self):
"""
The value_if_cond_true property for IfTernaryOperator class
"""
return self._args[1]
@property
def value_false(self):
"""
The value_if_cond_false property for IfTernaryOperator class
"""
return self._args[2]
#==============================================================================
Relational = (PyccelEq, PyccelNe, PyccelLt, PyccelLe, PyccelGt, PyccelGe, PyccelAnd, PyccelOr, PyccelNot, PyccelIs, PyccelIsNot)
| 30.134913
| 134
| 0.563918
|
rom ..errors.errors import Errors, PyccelSemanticError
from .basic import PyccelAstNode
from .datatypes import (NativeBool, NativeInteger, NativeReal,
NativeComplex, NativeString, default_precision,
NativeNumeric)
from .literals import Literal, LiteralInteger, LiteralFloat, LiteralComplex, Nil
from .literals import convert_to_literal
errors = Errors()
__all__ = (
'PyccelOperator',
'PyccelPow',
'PyccelAdd',
'PyccelMinus',
'PyccelMul',
'PyccelDiv',
'PyccelMod',
'PyccelFloorDiv',
'PyccelEq',
'PyccelNe',
'PyccelLt',
'PyccelLe',
'PyccelGt',
'PyccelGe',
'PyccelAnd',
'PyccelOr',
'PyccelNot',
'PyccelAssociativeParenthesis',
'PyccelUnary',
'PyccelUnarySub',
'Relational',
'PyccelIs',
'PyccelIsNot',
'IfTernaryOperator'
)
def broadcast(shape_1, shape_2):
from pyccel.ast.sympy_helper import pyccel_to_sympy
a = len(shape_1)
b = len(shape_2)
if a>b:
new_shape_2 = (LiteralInteger(1),)*(a-b) + tuple(shape_2)
new_shape_1 = shape_1
elif b>a:
new_shape_1 = (LiteralInteger(1),)*(b-a) + tuple(shape_1)
new_shape_2 = shape_2
else:
new_shape_2 = shape_2
new_shape_1 = shape_1
new_shape = []
for e1,e2 in zip(new_shape_1, new_shape_2):
used_names = set()
symbol_map = {}
sy_e1 = pyccel_to_sympy(e1, symbol_map, used_names)
sy_e2 = pyccel_to_sympy(e2, symbol_map, used_names)
if sy_e1 == sy_e2:
new_shape.append(e1)
elif sy_e1 == 1:
new_shape.append(e2)
elif sy_e2 == 1:
new_shape.append(e1)
elif sy_e1.is_constant() and not sy_e2.is_constant():
new_shape.append(e1)
elif sy_e2.is_constant() and not sy_e1.is_constant():
new_shape.append(e2)
elif not sy_e2.is_constant() and not sy_e1.is_constant()\
and not (sy_e1 - sy_e2).is_constant():
new_shape.append(e1)
else:
shape1_code = '({})'.format(' '.join([str(s)+',' for s in shape_1]))
shape2_code = '({})'.format(' '.join([str(s)+',' for s in shape_2]))
msg = 'operands could not be broadcast together with shapes {} {}'
msg = msg.format(shape1_code, shape2_code)
raise PyccelSemanticError(msg)
return tuple(new_shape)
class PyccelOperator(PyccelAstNode):
__slots__ = ('_args', )
_attribute_nodes = ('_args',)
def __init__(self, *args):
self._args = tuple(self._handle_precedence(args))
if self.stage == 'syntactic':
super().__init__()
return
self._set_dtype()
self._set_shape_rank()
self._set_order()
super().__init__()
def _set_dtype(self):
self._dtype, self._precision = self._calculate_dtype(*self._args)
def _set_shape_rank(self):
self._shape, self._rank = self._calculate_shape_rank(*self._args)
@property
def precedence(self):
return self._precedence
def _handle_precedence(self, args):
precedence = [getattr(a, 'precedence', 17) for a in args]
if min(precedence) <= self._precedence:
new_args = []
for i, (a,p) in enumerate(zip(args, precedence)):
if (p < self._precedence or (p == self._precedence and i != 0)):
new_args.append(PyccelAssociativeParenthesis(a))
else:
new_args.append(a)
args = tuple(new_args)
return args
def __str__(self):
return repr(self)
def _set_order(self):
if self._rank is not None and self._rank > 1:
if all(a.order == self._args[0].order for a in self._args):
self._order = self._args[0].order
else:
self._order = 'C'
else:
self._order = None
@property
def args(self):
return self._args
class PyccelUnaryOperator(PyccelOperator):
__slots__ = ('_dtype', '_precision','_shape','_rank','_order')
def __init__(self, arg):
super().__init__(arg)
@staticmethod
def _calculate_dtype(*args):
a = args[0]
dtype = a.dtype
precision = a.precision
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
a = args[0]
rank = a.rank
shape = a.shape
return shape, rank
class PyccelUnary(PyccelUnaryOperator):
__slots__ = ()
_precedence = 14
def _handle_precedence(self, args):
args = PyccelUnaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
def __repr__(self):
return '+{}'.format(repr(self.args[0]))
class PyccelUnarySub(PyccelUnary):
__slots__ = ()
def __repr__(self):
return '-{}'.format(repr(self.args[0]))
class PyccelNot(PyccelUnaryOperator):
__slots__ = ()
_precedence = 6
@staticmethod
def _calculate_dtype(*args):
dtype = NativeBool()
precision = default_precision['bool']
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
rank = 0
shape = ()
return shape, rank
def __repr__(self):
return 'not {}'.format(repr(self.args[0]))
class PyccelAssociativeParenthesis(PyccelUnaryOperator):
__slots__ = ()
_precedence = 18
def _handle_precedence(self, args):
return args
def __repr__(self):
return '({})'.format(repr(self.args[0]))
class PyccelBinaryOperator(PyccelOperator):
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
def __init__(self, arg1, arg2, simplify = False):
super().__init__(arg1, arg2)
@classmethod
def _calculate_dtype(cls, *args):
integers = [a for a in args if a.dtype in (NativeInteger(),NativeBool())]
reals = [a for a in args if a.dtype is NativeReal()]
complexes = [a for a in args if a.dtype is NativeComplex()]
strs = [a for a in args if a.dtype is NativeString()]
if strs:
return cls._handle_str_type(strs)
assert len(integers + reals + complexes) == 0
elif complexes:
return cls._handle_complex_type(complexes)
elif reals:
return cls._handle_real_type(reals)
elif integers:
return cls._handle_integer_type(integers)
else:
raise TypeError('cannot determine the type of {}'.format(args))
@staticmethod
def _handle_str_type(strs):
raise TypeError("unsupported operand type(s) for /: 'str' and 'str'")
@staticmethod
def _handle_complex_type(complexes):
dtype = NativeComplex()
precision = max(a.precision for a in complexes)
return dtype, precision
@staticmethod
def _handle_real_type(reals):
dtype = NativeReal()
precision = max(a.precision for a in reals)
return dtype, precision
@staticmethod
def _handle_integer_type(integers):
dtype = NativeInteger()
precision = max(a.precision for a in integers)
return dtype, precision
@staticmethod
def _calculate_shape_rank(*args):
strs = [a for a in args if a.dtype is NativeString()]
if strs:
other = [a for a in args if a.dtype in (NativeInteger(), NativeBool(), NativeReal(), NativeComplex())]
assert len(other) == 0
rank = 0
shape = ()
else:
ranks = [a.rank for a in args]
shapes = [a.shape for a in args]
if None in ranks:
rank = None
shape = None
elif all(sh is not None for tup in shapes for sh in tup):
s = broadcast(args[0].shape, args[1].shape)
shape = s
rank = len(s)
else:
rank = max(a.rank for a in args)
shape = [None]*rank
return shape, rank
class PyccelArithmeticOperator(PyccelBinaryOperator):
__slots__ = ()
def _handle_precedence(self, args):
args = PyccelBinaryOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args)
return args
class PyccelPow(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 15
def __repr__(self):
return '{} ** {}'.format(self.args[0], self.args[1])
class PyccelAdd(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelMinus(arg1, arg2.args[0], simplify = True)
dtype, precision = cls._calculate_dtype(arg1, arg2)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
return convert_to_literal(arg1.python_value + arg2.python_value,
dtype, precision)
if dtype == arg2.dtype and precision == arg2.precision and \
isinstance(arg1, Literal) and arg1.python_value == 0:
return arg2
if dtype == arg1.dtype and precision == arg1.precision and \
isinstance(arg2, Literal) and arg2.python_value == 0:
return arg1
if isinstance(arg1, (LiteralInteger, LiteralFloat)) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, arg2.imag)
elif isinstance(arg2, (LiteralInteger, LiteralFloat)) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(arg2, arg1.imag)
else:
return super().__new__(cls)
@staticmethod
def _handle_str_type(strs):
dtype = NativeString()
precision = None
return dtype, precision
def __repr__(self):
return '{} + {}'.format(self.args[0], self.args[1])
class PyccelMul(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if (arg1 == 1):
return arg2
if (arg2 == 1):
return arg1
if (arg1 == 0 or arg2 == 0):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(0, dtype, precision)
if (isinstance(arg1, PyccelUnarySub) and arg1.args[0] == 1):
return PyccelUnarySub(arg2)
if (isinstance(arg2, PyccelUnarySub) and arg2.args[0] == 1):
return PyccelUnarySub(arg1)
if isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value * arg2.python_value,
dtype, precision)
return super().__new__(cls)
def __repr__(self):
return '{} * {}'.format(self.args[0], self.args[1])
class PyccelMinus(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 12
def __new__(cls, arg1, arg2, simplify = False):
if simplify:
if isinstance(arg2, PyccelUnarySub):
return PyccelAdd(arg1, arg2.args[0], simplify = True)
elif isinstance(arg1, Literal) and isinstance(arg2, Literal):
dtype, precision = cls._calculate_dtype(arg1, arg2)
return convert_to_literal(arg1.python_value - arg2.python_value,
dtype, precision)
if isinstance(arg1, LiteralFloat) and \
isinstance(arg2, LiteralComplex) and \
arg2.real == LiteralFloat(0):
return LiteralComplex(arg1, -arg2.imag.python_value)
elif isinstance(arg2, LiteralFloat) and \
isinstance(arg1, LiteralComplex) and \
arg1.real == LiteralFloat(0):
return LiteralComplex(-arg2.python_value, arg1.imag)
else:
return super().__new__(cls)
def __repr__(self):
return '{} - {}'.format(repr(self.args[0]), repr(self.args[1]))
class PyccelDiv(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 13
def __new__(cls, arg1, arg2, simplify=False):
if simplify:
if (arg2 == 1):
return arg1
return super().__new__(cls)
@staticmethod
def _handle_integer_type(integers):
dtype = NativeReal()
precision = default_precision['real']
return dtype, precision
def __repr__(self):
return '{} + {}'.format(self.args[0], self.args[1])
def __repr__(self):
return '{} / {}'.format(self.args[0], self.args[1])
class PyccelMod(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} % {}'.format(self.args[0], self.args[1])
class PyccelFloorDiv(PyccelArithmeticOperator):
__slots__ = ()
_precedence = 13
def __repr__(self):
return '{} // {}'.format(self.args[0], self.args[1])
class PyccelComparisonOperator(PyccelBinaryOperator):
__slots__ = ()
_precedence = 7
@staticmethod
def _calculate_dtype(*args):
dtype = NativeBool()
precision = default_precision['bool']
return dtype, precision
class PyccelEq(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} == {}'.format(self.args[0], self.args[1])
class PyccelNe(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} != {}'.format(self.args[0], self.args[1])
class PyccelLt(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} < {}'.format(self.args[0], self.args[1])
class PyccelLe(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} <= {}'.format(self.args[0], self.args[1])
class PyccelGt(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} > {}'.format(self.args[0], self.args[1])
class PyccelGe(PyccelComparisonOperator):
__slots__ = ()
def __repr__(self):
return '{} >= {}'.format(self.args[0], self.args[1])
class PyccelBooleanOperator(PyccelOperator):
dtype = NativeBool()
precision = default_precision['bool']
rank = 0
shape = ()
order = None
__slots__ = ()
def _set_order(self):
pass
def _set_dtype(self):
pass
def _set_shape_rank(self):
pass
class PyccelAnd(PyccelBooleanOperator):
__slots__ = ()
_precedence = 5
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelOr) else a for a in args)
return args
def __repr__(self):
return '{} and {}'.format(self.args[0], self.args[1])
class PyccelOr(PyccelBooleanOperator):
__slots__ = ()
_precedence = 4
def _handle_precedence(self, args):
args = PyccelBooleanOperator._handle_precedence(self, args)
args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelAnd) else a for a in args)
return args
def __repr__(self):
return '{} or {}'.format(self.args[0], self.args[1])
class PyccelIs(PyccelBooleanOperator):
__slots__ = ()
_precedence = 7
def __init__(self, arg1, arg2):
super().__init__(arg1, arg2)
@property
def lhs(self):
return self._args[0]
@property
def rhs(self):
return self._args[1]
def __repr__(self):
return '{} is {}'.format(self.args[0], self.args[1])
class PyccelIsNot(PyccelIs):
__slots__ = ()
def __repr__(self):
return '{} is not {}'.format(self.args[0], self.args[1])
class IfTernaryOperator(PyccelOperator):
__slots__ = ('_dtype','_precision','_shape','_rank','_order')
_precedence = 3
def __init__(self, cond, value_true, value_false):
super().__init__(cond, value_true, value_false)
if self.stage == 'syntactic':
return
if isinstance(value_true , Nil) or isinstance(value_false, Nil):
errors.report('None is not implemented for Ternary Operator', severity='fatal')
if isinstance(value_true , NativeString) or isinstance(value_false, NativeString):
errors.report('String is not implemented for Ternary Operator', severity='fatal')
if value_true.dtype != value_false.dtype:
if value_true.dtype not in NativeNumeric or value_false.dtype not in NativeNumeric:
errors.report('The types are incompatible in IfTernaryOperator', severity='fatal')
if value_false.rank != value_true.rank :
errors.report('Ternary Operator results should have the same rank', severity='fatal')
if value_false.shape != value_true.shape :
errors.report('Ternary Operator results should have the same shape', severity='fatal')
@staticmethod
def _calculate_dtype(cond, value_true, value_false):
if value_true.dtype in NativeNumeric and value_false.dtype in NativeNumeric:
dtype = max([value_true.dtype, value_false.dtype], key = NativeNumeric.index)
else:
dtype = value_true.dtype
precision = max([value_true.precision, value_false.precision])
return dtype, precision
@staticmethod
def _calculate_shape_rank(cond, value_true, value_false):
shape = value_true.shape
rank = value_true.rank
if rank is not None and rank > 1:
if value_false.order != value_true.order :
errors.report('Ternary Operator results should have the same order', severity='fatal')
return shape, rank
@property
def cond(self):
return self._args[0]
@property
def value_true(self):
return self._args[1]
@property
def value_false(self):
return self._args[2]
Relational = (PyccelEq, PyccelNe, PyccelLt, PyccelLe, PyccelGt, PyccelGe, PyccelAnd, PyccelOr, PyccelNot, PyccelIs, PyccelIsNot)
| true
| true
|
f716dcc5929dc395b511c231b73a25ba28485635
| 607
|
py
|
Python
|
apps/shortener_app/migrations/0009_auto_20190123_1903.py
|
escrichov/shortener
|
f8a72edb0b40c20021541f5178f257590b478e02
|
[
"MIT"
] | 6
|
2018-12-16T12:35:18.000Z
|
2020-06-07T13:06:17.000Z
|
apps/shortener_app/migrations/0009_auto_20190123_1903.py
|
escrichov/shortener
|
f8a72edb0b40c20021541f5178f257590b478e02
|
[
"MIT"
] | 16
|
2019-06-10T19:10:01.000Z
|
2022-02-12T04:22:55.000Z
|
apps/shortener_app/migrations/0009_auto_20190123_1903.py
|
escrichov/shortener
|
f8a72edb0b40c20021541f5178f257590b478e02
|
[
"MIT"
] | 1
|
2019-01-18T00:06:13.000Z
|
2019-01-18T00:06:13.000Z
|
# Generated by Django 2.1.5 on 2019-01-23 19:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener_app', '0008_auto_20181205_2300'),
]
operations = [
migrations.AddField(
model_name='shorturl',
name='url_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='shorturl',
name='url_active_last_checked',
field=models.DateTimeField(default=datetime.datetime.utcnow),
),
]
| 24.28
| 73
| 0.614498
|
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener_app', '0008_auto_20181205_2300'),
]
operations = [
migrations.AddField(
model_name='shorturl',
name='url_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='shorturl',
name='url_active_last_checked',
field=models.DateTimeField(default=datetime.datetime.utcnow),
),
]
| true
| true
|
f716dcdbe7007bf7839728fd2b9195c4311f4c1c
| 2,821
|
py
|
Python
|
src/elaspic_rest_api/app.py
|
elaspic/elaspic-rest-api
|
b1ed2dae1870b5d0c678d196e39c8c806959e640
|
[
"MIT"
] | null | null | null |
src/elaspic_rest_api/app.py
|
elaspic/elaspic-rest-api
|
b1ed2dae1870b5d0c678d196e39c8c806959e640
|
[
"MIT"
] | null | null | null |
src/elaspic_rest_api/app.py
|
elaspic/elaspic-rest-api
|
b1ed2dae1870b5d0c678d196e39c8c806959e640
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from typing import Any, Dict
import sentry_sdk
from fastapi import BackgroundTasks, FastAPI
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
import elaspic_rest_api
from elaspic_rest_api import config
from elaspic_rest_api import jobsubmitter as js
from elaspic_rest_api.types import DataIn
logger = logging.getLogger(__name__)
description = """\
This page lists `ELASPIC` REST API endpoints that are available for evaluating the effect
of mutations on protein stability and protein interaction affinity.
Please see the source code repository for more information:
<https://gitlab.com/elaspic/elaspic-rest-api/>.
"""
app = FastAPI(
title="ELASPIC REST API",
description=description,
version=elaspic_rest_api.__version__,
root_path=config.ROOT_PATH,
)
js_data: Dict[str, Any] = {}
@app.post("/", status_code=200)
async def submit_job(data_in: DataIn, background_tasks: BackgroundTasks):
if data_in.api_token == config.API_TOKEN:
background_tasks.add_task(js.submit_job, data_in, js_data["ds"])
return {"status": "submitted"}
else:
return {"status": "restricted"}
@app.get("/status", status_code=200)
async def get_pre_qsub_queue(api_token: str):
queues_to_monitor = [
"pre_qsub_queue",
"qsub_queue",
"validation_queue",
"elaspic2_pending_queue",
"elaspic2_running_queue",
]
ds: js.DataStructures = js_data["ds"]
if api_token == config.API_TOKEN:
result = {
**{name: list(getattr(ds, name)._queue) for name in queues_to_monitor},
"monitored_jobs": [
(tuple(key), list(values)) for key, values in ds.monitored_jobs.items()
],
}
else:
result = {}
return result
@app.get("/_ah/warmup", include_in_schema=False)
def warmup():
return {}
@app.on_event("startup")
async def on_startup() -> None:
js_data["ds"] = js.DataStructures()
js_data["js_task"] = asyncio.create_task(
js.start_jobsubmitter(js_data["ds"]), name="jobsubmitter"
)
await asyncio.sleep(0.1)
js_task = js_data["js_task"]
if js_task.done() and (error := js_task.exception()):
js_task.print_stack()
logger.error("Task %s finished with an error: %s", js_task.name, error)
@app.on_event("shutdown")
async def on_shutdown() -> None:
js_task = js_data["js_task"]
js_task.cancel()
if js_task.done() and (error := js_task.exception()):
js_task.print_stack()
logger.error("Task %s finished with an error: %s", js_task.name, error)
await js.finalize_lingering_jobs(js_data["ds"])
await js_task
if config.SENTRY_DSN:
sentry_sdk.init(config.SENTRY_DSN, traces_sample_rate=1.0)
app = SentryAsgiMiddleware(app) # type: ignore
| 27.930693
| 89
| 0.686636
|
import asyncio
import logging
from typing import Any, Dict
import sentry_sdk
from fastapi import BackgroundTasks, FastAPI
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
import elaspic_rest_api
from elaspic_rest_api import config
from elaspic_rest_api import jobsubmitter as js
from elaspic_rest_api.types import DataIn
logger = logging.getLogger(__name__)
description = """\
This page lists `ELASPIC` REST API endpoints that are available for evaluating the effect
of mutations on protein stability and protein interaction affinity.
Please see the source code repository for more information:
<https://gitlab.com/elaspic/elaspic-rest-api/>.
"""
app = FastAPI(
title="ELASPIC REST API",
description=description,
version=elaspic_rest_api.__version__,
root_path=config.ROOT_PATH,
)
js_data: Dict[str, Any] = {}
@app.post("/", status_code=200)
async def submit_job(data_in: DataIn, background_tasks: BackgroundTasks):
if data_in.api_token == config.API_TOKEN:
background_tasks.add_task(js.submit_job, data_in, js_data["ds"])
return {"status": "submitted"}
else:
return {"status": "restricted"}
@app.get("/status", status_code=200)
async def get_pre_qsub_queue(api_token: str):
queues_to_monitor = [
"pre_qsub_queue",
"qsub_queue",
"validation_queue",
"elaspic2_pending_queue",
"elaspic2_running_queue",
]
ds: js.DataStructures = js_data["ds"]
if api_token == config.API_TOKEN:
result = {
**{name: list(getattr(ds, name)._queue) for name in queues_to_monitor},
"monitored_jobs": [
(tuple(key), list(values)) for key, values in ds.monitored_jobs.items()
],
}
else:
result = {}
return result
@app.get("/_ah/warmup", include_in_schema=False)
def warmup():
return {}
@app.on_event("startup")
async def on_startup() -> None:
js_data["ds"] = js.DataStructures()
js_data["js_task"] = asyncio.create_task(
js.start_jobsubmitter(js_data["ds"]), name="jobsubmitter"
)
await asyncio.sleep(0.1)
js_task = js_data["js_task"]
if js_task.done() and (error := js_task.exception()):
js_task.print_stack()
logger.error("Task %s finished with an error: %s", js_task.name, error)
@app.on_event("shutdown")
async def on_shutdown() -> None:
js_task = js_data["js_task"]
js_task.cancel()
if js_task.done() and (error := js_task.exception()):
js_task.print_stack()
logger.error("Task %s finished with an error: %s", js_task.name, error)
await js.finalize_lingering_jobs(js_data["ds"])
await js_task
if config.SENTRY_DSN:
sentry_sdk.init(config.SENTRY_DSN, traces_sample_rate=1.0)
app = SentryAsgiMiddleware(app)
| true
| true
|
f716dd589103e434f5c06b8eb30e4fe38d5df1b6
| 1,790
|
py
|
Python
|
rpncalc/binaryoperator.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | null | null | null |
rpncalc/binaryoperator.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | 11
|
2021-11-10T04:28:51.000Z
|
2022-02-25T05:19:22.000Z
|
rpncalc/binaryoperator.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | null | null | null |
import numpy
import math
from rpncalc.classes import ActionEnum
class BinaryOperator(ActionEnum):
addition = '+'
subtraction = '-'
multiplication = '*'
division = '/'
integer_division = '//'
power = '^'
atan2 = 'atan2', \
"Returns quadrant correct polar coordinate theta = atan2(y,x)"
log_base = 'log_base', \
"Logarithm with prior arg as base" \
"Example: 1000 10 log_base returns 3"
equals = '='
gt = '>'
gte = '>='
lt = '<'
lte = '<='
choose = 'choose'
combinations = 'combo'
def action(self):
v1, v0 = self.take_2()
o = type(self)
match self:
case o.addition:
r = v0+v1
case o.subtraction:
r = v0-v1
case o.multiplication:
r = v0*v1
case o.division:
r = v0/v1
case o.integer_division:
r = v0//v1
case o.power:
r = numpy.power(v0, v1)
case o.log_base:
r = numpy.log(v0)/numpy.log(v1)
case o.atan2:
r = numpy.arctan2(v0, v1)
case o.equals:
r = v0 == v1
case o.gt:
r = v0 > v1
case o.gte:
r = v0 >= v1
case o.lt:
r = v0 < v1
case o.lte:
r = v0 <= v1
case o.choose:
f = math.factorial
r = f(v0)//(f(v0-v1))
case o.combinations:
f = math.factorial
r = f(v0)//(f(v0-v1)*f(v1))
case _:
msg = f"Missing case match for {self}"
raise NotImplementedError(msg)
self.push(r)
| 25.211268
| 70
| 0.430726
|
import numpy
import math
from rpncalc.classes import ActionEnum
class BinaryOperator(ActionEnum):
addition = '+'
subtraction = '-'
multiplication = '*'
division = '/'
integer_division = '//'
power = '^'
atan2 = 'atan2', \
"Returns quadrant correct polar coordinate theta = atan2(y,x)"
log_base = 'log_base', \
"Logarithm with prior arg as base" \
"Example: 1000 10 log_base returns 3"
equals = '='
gt = '>'
gte = '>='
lt = '<'
lte = '<='
choose = 'choose'
combinations = 'combo'
def action(self):
v1, v0 = self.take_2()
o = type(self)
match self:
case o.addition:
r = v0+v1
case o.subtraction:
r = v0-v1
case o.multiplication:
r = v0*v1
case o.division:
r = v0/v1
case o.integer_division:
r = v0//v1
case o.power:
r = numpy.power(v0, v1)
case o.log_base:
r = numpy.log(v0)/numpy.log(v1)
case o.atan2:
r = numpy.arctan2(v0, v1)
case o.equals:
r = v0 == v1
case o.gt:
r = v0 > v1
case o.gte:
r = v0 >= v1
case o.lt:
r = v0 < v1
case o.lte:
r = v0 <= v1
case o.choose:
f = math.factorial
r = f(v0)//(f(v0-v1))
case o.combinations:
f = math.factorial
r = f(v0)//(f(v0-v1)*f(v1))
case _:
msg = f"Missing case match for {self}"
raise NotImplementedError(msg)
self.push(r)
| true
| true
|
f716de749187532c276040a0b1e00777b44337ce
| 592
|
py
|
Python
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 71
|
2021-01-23T17:34:33.000Z
|
2022-03-29T13:11:29.000Z
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 38
|
2021-01-24T21:56:30.000Z
|
2022-03-08T18:49:00.000Z
|
api_logic_server_cli/project_prototype/util.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 14
|
2021-01-23T16:20:44.000Z
|
2022-03-24T10:48:28.000Z
|
import sqlite3
from os import path
import sys
import logging
app_logger = logging.getLogger("api_logic_server_app")
def log(msg: any) -> None:
app_logger.info(msg)
# print("TIL==> " + msg)
def connection() -> sqlite3.Connection:
ROOT: str = path.dirname(path.realpath(__file__))
log(ROOT)
_connection = sqlite3.connect(path.join(ROOT, "sqlitedata.db"))
return _connection
def dbpath(dbname: str) -> str:
ROOT: str = path.dirname(path.realpath(__file__))
log('ROOT: '+ROOT)
PATH: str = path.join(ROOT, dbname)
log('DBPATH: '+PATH)
return PATH
| 22.769231
| 67
| 0.675676
|
import sqlite3
from os import path
import sys
import logging
app_logger = logging.getLogger("api_logic_server_app")
def log(msg: any) -> None:
app_logger.info(msg)
def connection() -> sqlite3.Connection:
ROOT: str = path.dirname(path.realpath(__file__))
log(ROOT)
_connection = sqlite3.connect(path.join(ROOT, "sqlitedata.db"))
return _connection
def dbpath(dbname: str) -> str:
ROOT: str = path.dirname(path.realpath(__file__))
log('ROOT: '+ROOT)
PATH: str = path.join(ROOT, dbname)
log('DBPATH: '+PATH)
return PATH
| true
| true
|
f716ded79220bf0850640a2412069bb981807960
| 3,633
|
py
|
Python
|
meegkit/utils/trca.py
|
ludovicdmt/python-meegkit
|
4aa4ba49354b996be20eda41660a550d1bd31f9a
|
[
"BSD-3-Clause"
] | null | null | null |
meegkit/utils/trca.py
|
ludovicdmt/python-meegkit
|
4aa4ba49354b996be20eda41660a550d1bd31f9a
|
[
"BSD-3-Clause"
] | null | null | null |
meegkit/utils/trca.py
|
ludovicdmt/python-meegkit
|
4aa4ba49354b996be20eda41660a550d1bd31f9a
|
[
"BSD-3-Clause"
] | null | null | null |
"""TRCA utils."""
import numpy as np
from scipy.signal import filtfilt, cheb1ord, cheby1
from scipy import stats
def round_half_up(num, decimals=0):
"""Round half up round the last decimal of the number.
The rules are:
from 0 to 4 rounds down
from 5 to 9 rounds up
Parameters
----------
num : float
Number to round
decimals : number of decimals
Returns
-------
num rounded
"""
multiplier = 10 ** decimals
return int(np.floor(num * multiplier + 0.5) / multiplier)
def normfit(data, ci=0.95):
"""Compute the mean, std and confidence interval for them.
Parameters
----------
data : array, shape=()
Input data.
ci : float
Confidence interval (default=0.95).
Returns
-------
m : mean
sigma : std deviation
[m - h, m + h] : confidence interval of the mean
[sigmaCI_lower, sigmaCI_upper] : confidence interval of the std
"""
arr = 1.0 * np.array(data)
num = len(arr)
avg, std_err = np.mean(arr), stats.sem(arr)
h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1)
var = np.var(data, ddof=1)
var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1)
var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1)
sigma = np.sqrt(var)
sigma_ci_lower = np.sqrt(var_ci_lower)
sigma_ci_upper = np.sqrt(var_ci_upper)
return avg, sigma, [avg - h_int, avg +
h_int], [sigma_ci_lower, sigma_ci_upper]
def itr(n, p, t):
"""Compute information transfer rate (ITR).
Definition in [1]_.
Parameters
----------
n : int
Number of targets.
p : float
Target identification accuracy (0 <= p <= 1).
t : float
Average time for a selection (s).
Returns
-------
itr : float
Information transfer rate [bits/min]
References
----------
.. [1] M. Cheng, X. Gao, S. Gao, and D. Xu,
"Design and Implementation of a Brain-Computer Interface With High
Transfer Rates", IEEE Trans. Biomed. Eng. 49, 1181-1186, 2002.
"""
itr = 0
if (p < 0 or 1 < p):
raise ValueError('Accuracy need to be between 0 and 1.')
elif (p < 1 / n):
raise ValueError('ITR might be incorrect because accuracy < chance')
itr = 0
elif (p == 1):
itr = np.log2(n) * 60 / t
else:
itr = (np.log2(n) + p * np.log2(p) + (1 - p) *
np.log2((1 - p) / (n - 1))) * 60 / t
return itr
def bandpass(eeg, sfreq, Wp, Ws):
"""Filter bank design for decomposing EEG data into sub-band components.
Parameters
----------
eeg : np.array, shape=(n_samples, n_chans[, n_trials])
Training data.
sfreq : int
Sampling frequency of the data.
Wp : 2-tuple
Passband for Chebyshev filter.
Ws : 2-tuple
Stopband for Chebyshev filter.
Returns
-------
y: np.array, shape=(n_trials, n_chans, n_samples)
Sub-band components decomposed by a filter bank.
See Also
--------
scipy.signal.cheb1ord :
Chebyshev type I filter order selection.
"""
# Chebyshev type I filter order selection.
N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq)
# Chebyshev type I filter design
B, A = cheby1(N, 0.5, Wn, btype="bandpass", fs=sfreq)
# the arguments 'axis=0, padtype='odd', padlen=3*(max(len(B),len(A))-1)'
# correspond to Matlab filtfilt : https://dsp.stackexchange.com/a/47945
y = filtfilt(B, A, eeg, axis=0, padtype='odd',
padlen=3 * (max(len(B), len(A)) - 1))
return y
| 26.136691
| 78
| 0.570603
|
import numpy as np
from scipy.signal import filtfilt, cheb1ord, cheby1
from scipy import stats
def round_half_up(num, decimals=0):
multiplier = 10 ** decimals
return int(np.floor(num * multiplier + 0.5) / multiplier)
def normfit(data, ci=0.95):
arr = 1.0 * np.array(data)
num = len(arr)
avg, std_err = np.mean(arr), stats.sem(arr)
h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1)
var = np.var(data, ddof=1)
var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1)
var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1)
sigma = np.sqrt(var)
sigma_ci_lower = np.sqrt(var_ci_lower)
sigma_ci_upper = np.sqrt(var_ci_upper)
return avg, sigma, [avg - h_int, avg +
h_int], [sigma_ci_lower, sigma_ci_upper]
def itr(n, p, t):
itr = 0
if (p < 0 or 1 < p):
raise ValueError('Accuracy need to be between 0 and 1.')
elif (p < 1 / n):
raise ValueError('ITR might be incorrect because accuracy < chance')
itr = 0
elif (p == 1):
itr = np.log2(n) * 60 / t
else:
itr = (np.log2(n) + p * np.log2(p) + (1 - p) *
np.log2((1 - p) / (n - 1))) * 60 / t
return itr
def bandpass(eeg, sfreq, Wp, Ws):
N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq)
B, A = cheby1(N, 0.5, Wn, btype="bandpass", fs=sfreq)
y = filtfilt(B, A, eeg, axis=0, padtype='odd',
padlen=3 * (max(len(B), len(A)) - 1))
return y
| true
| true
|
f716df4ee14d7c3327f654e758f30bd597015ed0
| 4,458
|
py
|
Python
|
book_code_selected_keras_examples/cifar/cifar10_cnn.py
|
IntuitionMachine/DeepLearningGuide
|
7270b13ee5783a23482738cdf9d355c10d25360d
|
[
"MIT"
] | 1
|
2019-05-02T02:53:34.000Z
|
2019-05-02T02:53:34.000Z
|
book_code_selected_keras_examples/cifar/cifar10_cnn.py
|
IntuitionMachine/DeepLearningGuide
|
7270b13ee5783a23482738cdf9d355c10d25360d
|
[
"MIT"
] | null | null | null |
book_code_selected_keras_examples/cifar/cifar10_cnn.py
|
IntuitionMachine/DeepLearningGuide
|
7270b13ee5783a23482738cdf9d355c10d25360d
|
[
"MIT"
] | null | null | null |
'''Train a simple deep CNN on the CIFAR10 small images dataset.
It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
from time import time
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import TensorBoard
import os
batch_size = 32
num_classes = 10
epochs = 10
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# Print a summary of the model architecture
model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
tensorboard = TensorBoard(log_dir="/logs/{}".format(time()), write_graph=True, write_images=True)
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
workers=4)
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# Load the saved model and run it on some data
model = load_model('saved_models/keras_cifar10_trained_model.h5')
scores = model.predict(x_test, batch_size=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| 33.772727
| 97
| 0.711306
|
from __future__ import print_function
from time import time
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import TensorBoard
import os
batch_size = 32
num_classes = 10
epochs = 10
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
tensorboard = TensorBoard(log_dir="/logs/{}".format(time()), write_graph=True, write_images=True)
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
workers=4)
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# Load the saved model and run it on some data
model = load_model('saved_models/keras_cifar10_trained_model.h5')
scores = model.predict(x_test, batch_size=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| true
| true
|
f716dfa33ddc3edf22a30911544807cfce14bd8a
| 247,289
|
py
|
Python
|
tensorflow/python/framework/ops.py
|
jraman/tensorflow
|
41c6bf7c6215bea9bfb9bf0a9b63f2084e6f3058
|
[
"Apache-2.0"
] | 1
|
2020-10-01T16:52:51.000Z
|
2020-10-01T16:52:51.000Z
|
tensorflow/python/framework/ops.py
|
rakeshacharya-d/tensorflow
|
9028828d3b8a2a622f7203a317002cc749531695
|
[
"Apache-2.0"
] | 1
|
2022-02-10T01:08:48.000Z
|
2022-02-10T01:08:48.000Z
|
tensorflow/python/framework/ops.py
|
rakeshacharya-d/tensorflow
|
9028828d3b8a2a622f7203a317002cc749531695
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
# pywrap_tensorflow must be imported first to avoid profobuf issues.
# (b/143110113)
# pylint: disable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import pywrap_tfe
# pylint: enable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name`, `dtype` and `shape` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
if not (hasattr(tensor_type, "name") and
isinstance(tensor_type.name, property)):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "dtype") and
isinstance(tensor_type.dtype, property)):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "shape") and
isinstance(tensor_type.shape, property)):
raise TypeError("Type %s does not define a `shape` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return pywrap_tfe.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""A tensor represents a rectangular array of data.
When writing a TensorFlow program, the main object you manipulate and pass
around is the `tf.Tensor`. A `tf.Tensor` object represents a rectangular array
of arbitrary dimension, filled with data of a specific data type.
A `tf.Tensor` has the following properties:
* a data type (float32, int32, or string, for example)
* a shape
Each element in the Tensor has the same data type, and the data type is always
known.
In eager execution, which is the default mode in TensorFlow, results are
calculated immediately.
>>> # Compute some values using a Tensor
>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
>>> e = tf.matmul(c, d)
>>> print(e)
tf.Tensor(
[[1. 3.]
[3. 7.]], shape=(2, 2), dtype=float32)
Note that during eager execution, you may discover your `Tensors` are actually
of type `EagerTensor`. This is an internal detail, but it does give you
access to a useful function, `numpy`:
>>> type(e)
<class '...ops.EagerTensor'>
>>> print(e.numpy())
[[1. 3.]
[3. 7.]]
TensorFlow can define computations without immediately executing them, most
commonly inside `tf.function`s, as well as in (legacy) Graph mode. In those
cases, the shape (that is, the rank of the Tensor and the size of
each dimension) might be only partially known.
Most operations produce tensors of fully-known shapes if the shapes of their
inputs are also fully known, but in some cases it's only possible to find the
shape of a tensor at execution time.
There are specialized tensors; for these, see `tf.Variable`, `tf.constant`,
`tf.placeholder`, `tf.SparseTensor`, and `tf.RaggedTensor`.
For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor`).
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to execute the underlying kernel. This
can be used for debugging and providing early error messages. For
example:
```python
>>> c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> print(c.shape) # will be TensorShape([2, 3])
(2, 3)
>>> d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
>>> print(d.shape)
(4, 2)
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
>>> e = tf.matmul(c, d)
Traceback (most recent call last):
...
tensorflow.python.framework.errors_impl.InvalidArgumentError: Matrix
size-incompatible: In[0]: [2,3], In[1]: [4,2] [Op:MatMul] name: MatMul/
# This works because we have compatible shapes.
>>> f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
>>> print(f.shape)
(3, 4)
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `tf.TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vec = [None if d == -1 else d for d in shape_vec]
return tensor_shape.TensorShape(shape_vec)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return _TensorIterator(self, shape[0])
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of `tf.Tensor.shape`."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
pywrap_tf_session.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Note: If you are not using `compat.v1` libraries, you should not need this,
(or `feed_dict` or `Session`). In eager execution (or within `tf.function`)
you do not need to call `eval`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Variable also has the same ref() API. If you update the
# documentation here, please update tf.Variable.ref() as well.
"""Returns a hashable reference object to this Tensor.
The primary use case for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.constant(5)
>>> y = tf.constant(10)
>>> z = tf.constant(10)
>>> tensor_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
>>> tensor_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `tensor.ref()`.
>>> tensor_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in tensor_set
True
>>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> tensor_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Tensor.
>>> x = tf.constant(5)
>>> x.ref().deref()
<tf.Tensor: shape=(), dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __complex__(self):
return complex(self._numpy())
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
# pylint: disable=protected-access
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
# pylint: disable=protected-access
try:
return self._numpy_internal()
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Copy of the contents of this Tensor into a NumPy array or scalar.
Unlike NumPy arrays, Tensors are immutable, so this method has to copy
the contents to ensure safety. Use `memoryview` to get a readonly
view of the contents without doing a copy:
>>> t = tf.constant([42])
>>> np.array(memoryview(t))
array([42], dtype=int32)
Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor
is on GPU, it will have to be transferred to CPU first in order for
`memoryview` to work.
Returns:
A NumPy array of the same shape and dtype or a NumPy scalar, if this
Tensor has rank 0.
Raises:
ValueError: If the dtype of this Tensor does not have a compatible
NumPy dtype.
"""
# TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# pylint: disable=protected-access
try:
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
>>> def my_func(arg):
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
... return arg
>>> # The following calls are equivalent.
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
>>> print(value_1)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
>>> print(value_2)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
>>> print(value_3)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$")
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A flattened list of `Tensor`s. This function handles grouping
tensors into lists as per attributes in the `node_def`.
control_inputs: A list of `Operation`s to set as control dependencies.
op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not
specified, is looked up from the `graph` using `node_def.op`.
Returns:
A wrapped TF_Operation*.
"""
if op_def is None:
op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)
# pylint: disable=protected-access
op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
pywrap_tf_session.TF_AddInputList(op_desc,
[t._as_tf_output() for t in op_input])
else:
pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),
serialized)
try:
c_op = pywrap_tf_session.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`
objects as input, and produces zero or more `Tensor` objects as output.
Objects of type `Operation` are created by calling a Python op constructor
(such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`
context manager.
For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an
`Operation` of type "MatMul" that takes tensors `a` and `b` as input, and
produces `c` as output.
If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be
executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for
calling `tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "TF_Operation":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
self._c_op = _create_c_op(self._graph, node_def, inputs,
control_input_ops, op_def)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
"""
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return pywrap_tf_session.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return pywrap_tf_session.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in pywrap_tf_session.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
output_types = [
int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))
for i in xrange(num_outputs)
]
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = pywrap_tf_session.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = pywrap_tf_session.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
pywrap_tf_session.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
@property
def inputs(self):
"""The sequence of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(
map(self.graph._get_tensor_by_tf_output,
pywrap_tf_session.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(
pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return pywrap_tf_session.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = pywrap_tf_session.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
pywrap_tf_session.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
"""Set an attr in the node_def with a pre-allocated buffer."""
# pylint: disable=protected-access
pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,
attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
Graphs are used by `tf.function`s to represent the function's computations.
Each graph contains a set of `tf.Operation` objects, which represent units of
computation; and `tf.Tensor` objects, which represent the units of data that
flow between operations.
### Using graphs directly (deprecated)
A `tf.Graph` can be constructed and used directly without a `tf.function`, as
was required in TensorFlow 1, but this is deprecated and it is recommended to
use a `tf.function` instead. If a graph is directly used, other deprecated
TensorFlow 1 classes are also required to execute the graph, such as a
`tf.compat.v1.Session`.
A default graph can be registered with the `tf.Graph.as_default` context
manager. Then, operations will be added to the graph instead of being executed
eagerly. For example:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
`tf.compat.v1.get_default_graph()` can be used to obtain the default graph.
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# A map from op type to a gradient function that should be used instead.
self._gradient_function_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
"""Adds 'op' to the graph and returns the unique ID for the added Operation.
Args:
op: the Operation to add.
op_name: the name of the Operation.
Returns:
An integer that is a unique ID for the added Operation.
"""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
# TODO(b/141471245): Fix the inconsistency when inputs of func graph
# are appended during gradient computation of while/cond.
for input_tensor, _ in zip(func_graph_inputs,
function_def.signature.input_arg):
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
outputs = op.outputs
if op.type == "StatefulPartitionedCall":
# Filter out any extra outputs (possibly added by function
# backpropagation rewriting).
num_outputs = len(node.attr["Tout"].list.type)
outputs = outputs[:num_outputs]
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,
gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set(t.op for t in inputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friendly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = pywrap_tf_session.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),
buf)
# pylint: enable=protected-access
data = pywrap_tf_session.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend(c for c in controller.control_inputs if c not in input_ops)
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
"""Specify gradient function for the given op type."""
# This is an internal API and we don't need nested context for this.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
yield
self._gradient_function_map = {}
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
This function specifies the device to be used for ops created/executed in a
particular context. Nested contexts will inherit and also create/execute
their ops on the specified device. If a specific device is not required,
consider not using this function so that a device can be automatically
assigned. In general the use of this function is optional. `device_name` can
be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially
specified, containing only a subset of the "/"-separated fields. Any fields
which are specified will override device annotations from outer scopes.
For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Execute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when tracing a `tf.function`. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.function
def func():
# A function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function.
This function will check the outermost context for the program and see if
it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,
which checks the current context and will return `False` within a
`tf.function` body. It can be used to build library that behave differently
in eager runtime and v1 session runtime (deprecated).
Example:
>>> tf.compat.v1.enable_eager_execution()
>>> @tf.function
... def func():
... # A function constructs TensorFlow graphs, it does not execute eagerly,
... # but the outer most context is still eager.
... assert not tf.executing_eagerly()
... return tf.compat.v1.executing_eagerly_outside_functions()
>>> func()
<tf.Tensor: shape=(), dtype=bool, numpy=True>
Returns:
boolean, whether the outermost context is in eager mode.
"""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None, skip_on_eager=True):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
skip_on_eager: Indicates to return NullContextmanager if executing eagerly.
By default this is True since naming tensors and operations in eager mode
have little use and cause unnecessary performance overhead. However, it is
important to preserve variable names since they are often useful for
debugging and saved models.
Returns:
`name_scope*` context manager.
"""
ctx = context.context()
in_eager_mode = ctx.executing_eagerly()
if not in_eager_mode:
return internal_name_scope_v1(name, default_name, values)
if skip_on_eager:
return NullContextmanager()
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
"""Make a given op wrapper function `f` raw.
Raw op wrappers can only be called with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
"""
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(f)
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
def add_exit_callback_to_default_func_graph(fn):
"""Add a callback to run when the default function graph goes out of scope.
Usage:
```python
@tf.function
def fn(x, v):
expensive = expensive_object(v)
add_exit_callback_to_default_func_graph(lambda: expensive.release())
return g(x, expensive)
fn(x=tf.constant(...), v=...)
# `expensive` has been released.
```
Args:
fn: A callable that takes no arguments and whose output is ignored.
To be executed when exiting func graph scope.
Raises:
RuntimeError: If executed when the current default graph is not a FuncGraph,
or not currently executing in function creation mode (e.g., if inside
an init_scope).
"""
default_graph = get_default_graph()
if not default_graph._building_function: # pylint: disable=protected-access
raise RuntimeError(
"Cannot add scope exit callbacks when not building a function. "
"Default graph: {}".format(default_graph))
default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
class _TensorIterator(object):
"""Iterates over the leading dim of a Tensor. Performs no error checks."""
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
next = __next__ # python2.x compatibility.
| 36.68432
| 115
| 0.692594
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map
from six.moves import xrange
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
def tensor_id(tensor):
return tensor._id
class _UserDeviceSpec(object):
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False
def _override_helper(clazz_object, operator, func):
existing = getattr(clazz_object, operator, None)
if existing is not None:
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
if not (hasattr(tensor_type, "name") and
isinstance(tensor_type.name, property)):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "dtype") and
isinstance(tensor_type.dtype, property)):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "shape") and
isinstance(tensor_type.shape, property)):
raise TypeError("Type %s does not define a `shape` property" %
tensor_type.__name__)
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
return pywrap_tfe.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
if tensor.dtype.is_numpy_compatible:
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
Tensor._USE_EQUALITY = True
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
Tensor._USE_EQUALITY = False
@tf_export("Tensor")
class Tensor(_TensorLike):
OVERLOADABLE_OPERATORS = {
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
self._tf_output = None
self._shape_val = None
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
return self._op
@property
def dtype(self):
return self._dtype
@property
def graph(self):
return self._op.graph
@property
def name(self):
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
return self._op.device
@property
def shape(self):
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
c_graph = self._op._graph._c_graph
shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vec = [None if d == -1 else d for d in shape_vec]
return tensor_shape.TensorShape(shape_vec)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return _TensorIterator(self, shape[0])
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
return self.shape.ndims
def get_shape(self):
return self.shape
def set_shape(self, shape):
self._shape_val = None
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
pywrap_tf_session.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph,
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
raise ValueError(str(e))
@property
def value_index(self):
return self._value_index
def consumers(self):
consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
def _as_node_def_input(self):
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
self._disallow_bool_casting()
def __nonzero__(self):
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
return object_identity.Reference(self)
class _EagerTensorBase(Tensor):
def __complex__(self):
return complex(self._numpy())
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
try:
return self._numpy_internal()
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
@property
def dtype(self):
return dtypes._INTERN_TABLE[self._datatype_enum()]
def numpy(self):
maybe_arr = self._numpy()
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
raise NotImplementedError()
def _rank(self):
raise NotImplementedError()
def _num_elements(self):
raise NotImplementedError()
def _copy_to_device(self, device_name):
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
new_tensor = self._copy_nograd(ctx, device_name)
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
@property
def shape(self):
if self._tensor_shape is None:
try:
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
return self.shape
def _shape_as_list(self):
return list(self._shape_tuple())
@property
def ndim(self):
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$")
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):
if op_def is None:
op_def = graph._get_op_def(node_def.op)
inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)
# pylint: disable=protected-access
op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
pywrap_tf_session.TF_AddInputList(op_desc,
[t._as_tf_output() for t in op_input])
else:
pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),
serialized)
try:
c_op = pywrap_tf_session.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "TF_Operation":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
self._c_op = _create_c_op(self._graph, node_def, inputs,
control_input_ops, op_def)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def colocation_groups(self):
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
return tuple(self.outputs)
def _get_control_flow_context(self):
return self._control_flow_context
def _set_control_flow_context(self, ctx):
self._control_flow_context = ctx
@property
def name(self):
return pywrap_tf_session.TF_OperationName(self._c_op)
@property
def _id(self):
return self._id_value
@property
def device(self):
return pywrap_tf_session.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
return self._device_code_locations or []
@property
def _colocation_dict(self):
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
output_types = [
int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))
for i in xrange(num_outputs)
]
return output_types
def _tf_output(self, output_idx):
tf_output = pywrap_tf_session.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
tf_input = pywrap_tf_session.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
pywrap_tf_session.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
return self._outputs
@property
def inputs(self):
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(
map(self.graph._get_tensor_by_tf_output,
pywrap_tf_session.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(
pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
return pywrap_tf_session.TF_OperationOpType(self._c_op)
@property
def graph(self):
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
return self._traceback
def _set_attr(self, attr_name, attr_value):
buf = pywrap_tf_session.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
pywrap_tf_session.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
# pylint: disable=protected-access
pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,
attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
# pylint: disable=protected-access
pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
try:
dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
try:
return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
try:
return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
def __init__(self, op_type):
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
pass
class OpStats(object):
def __init__(self, statistic_type, value=None):
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
def __init__(self, op_type, statistic_type):
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
def __init__(self):
self._lock = threading.RLock()
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {}
self._next_id_counter = 0
self._nodes_by_name = {}
self._version = 0
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
self._graph_device_function_stack = traceable_stack.TraceableStack()
self._default_original_op = None
self._control_flow_context = None
self._graph_control_dependencies_stack = []
self._collections = {}
self._seed = None
self._attr_scope_map = {}
self._op_to_kernel_label_map = {}
self._gradient_override_map = {}
self._gradient_function_map = {}
self._finalized = False
self._functions = collections.OrderedDict()
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
self._graph_colocation_stack = traceable_stack.TraceableStack()
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
self._unfetchable_ops = set()
self._handle_feeders = {}
self._handle_readers = {}
self._handle_movers = {}
self._handle_deleters = {}
self._graph_key = "grap-key-%d/" % (uid(),)
self._last_loss_reduction = None
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._add_control_dependencies = False
self._op_def_cache = {}
self._bcast_grad_args_cache = {}
self._reduced_shape_cache = {}
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# want to break these existing cases).
pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
return self._finalized
def finalize(self):
self._finalized = True
def _unsafe_unfinalize(self):
self._finalized = False
def _get_control_flow_context(self):
return self._control_flow_context
def _set_control_flow_context(self, ctx):
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# does. Both rely on ops.py, so we can't really isinstance check
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor, _ in zip(func_graph_inputs,
function_def.signature.input_arg):
if input_tensor.dtype == dtypes.resource:
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
outputs = op.outputs
if op.type == "StatefulPartitionedCall":
num_outputs = len(node.attr["Tout"].list.type)
outputs = outputs[:num_outputs]
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
return compat.as_str(name) in self._functions
def _get_function(self, name):
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
name = function.name
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,
gradient)
self._functions[compat.as_str(name)] = function
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
return self._building_function
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set(t.op for t in inputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value)
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel",
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type)
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type",
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = pywrap_tf_session.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),
buf)
# pylint: enable=protected-access
data = pywrap_tf_session.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
return _default_graph_stack.get_controller(self)
@property
def collections(self):
return list(self._collections)
def add_to_collection(self, name, value):
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
@tf_contextlib.contextmanager
def name_scope(self, name):
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name:
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
def unique_name(self, name, mark_as_used=True):
if self._name_stack:
name = self._name_stack + "/" + name
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
if mark_as_used:
self._names_in_use[name_key] = 1
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
@tf_contextlib.contextmanager
def container(self, container_name):
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
class _ControlDependenciesController(object):
def __init__(self, graph, control_inputs):
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
def __enter__(self):
if self._new_stack:
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
ret = []
for controller in self._control_dependencies_stack:
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
ret.extend(c for c in controller.control_inputs if c not in input_ops)
return ret
def _record_op_seen_by_control_dependencies(self, op):
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
if control_inputs is None:
return self._ControlDependenciesController(self, None)
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
# This is an internal API and we don't need nested context for this.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
yield
self._gradient_function_map = {}
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
saved_mappings = {}
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield
finally:
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
def prevent_feeding(self, tensor):
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
self._thread_local._colocation_stack = colocation_stack
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
if context.executing_eagerly():
if control_inputs:
# Execute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# live on the context stack), but it is stored in the graph stack's
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default
if outer_context is None:
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack
outer_graph._device_function_stack = innermost_nonempty_device_stack
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
with context_manager(context_manager_input):
yield
finally:
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions():
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None)
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
context._context._thread_local_data.is_eager = True
context.context = context.context_safe
def eager_run(main=None, argv=None):
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
return _default_graph_stack.get_default()
def has_default_graph():
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list)
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
original_graph_element = None
for op_input in op_input_list:
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
GLOBAL_VARIABLES = "variables"
LOCAL_VARIABLES = "local_variables"
METRIC_VARIABLES = "metric_variables"
MODEL_VARIABLES = "model_variables"
TRAINABLE_VARIABLES = "trainable_variables"
SUMMARIES = "summaries"
QUEUE_RUNNERS = "queue_runners"
TABLE_INITIALIZERS = "table_initializer"
ASSET_FILEPATHS = "asset_filepaths"
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
REGULARIZATION_LOSSES = "regularization_losses"
CONCATENATED_VARIABLES = "concatenated_variables"
SAVERS = "savers"
WEIGHTS = "weights"
BIASES = "biases"
ACTIVATIONS = "activations"
UPDATE_OPS = "update_ops"
LOSSES = "losses"
SAVEABLE_OBJECTS = "saveable_objects"
RESOURCES = "resources"
LOCAL_RESOURCES = "local_resources"
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
_SUMMARY_COLLECTION = "_SUMMARY_V2"
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls):
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
memory.dismantle_ordered_dict(graph._functions)
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None, skip_on_eager=True):
ctx = context.context()
in_eager_mode = ctx.executing_eagerly()
if not in_eager_mode:
return internal_name_scope_v1(name, default_name, values)
if skip_on_eager:
return NullContextmanager()
name = default_name if name is None else name
if values:
graph_value = next((value for value in values if type(value) == Tensor),
None)
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object):
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
if self._name is None and self._values is not None:
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
@tf_export(v1=["name_scope"])
class name_scope_v1(object):
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
def enter_eager_name_scope(ctx, name):
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
def __init__(self, name):
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False
def strip_name_scope(name, export_scope):
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
logging.warning(e)
return name
else:
return name
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(f)
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
def add_exit_callback_to_default_func_graph(fn):
default_graph = get_default_graph()
if not default_graph._building_function: # pylint: disable=protected-access
raise RuntimeError(
"Cannot add scope exit callbacks when not building a function. "
"Default graph: {}".format(default_graph))
default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(op_def, inputs, attrs):
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
class _TensorIterator(object):
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
next = __next__ # python2.x compatibility.
| true
| true
|
f716e0b7ca7df97d982aaf86ed718db537ad482b
| 49,994
|
py
|
Python
|
research/compression/distillation/resnet.py
|
ragavvenkatesan/models
|
420a88c7af20dae8d79dbc1b4351fef41be361c8
|
[
"Apache-2.0"
] | null | null | null |
research/compression/distillation/resnet.py
|
ragavvenkatesan/models
|
420a88c7af20dae8d79dbc1b4351fef41be361c8
|
[
"Apache-2.0"
] | null | null | null |
research/compression/distillation/resnet.py
|
ragavvenkatesan/models
|
420a88c7af20dae8d79dbc1b4351fef41be361c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks
(also known as ResNet v2).
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
parse_record_fn, num_epochs=1, num_parallel_calls=1):
"""Given a Dataset with raw records, parse each record into images and labels,
and return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
num_parallel_calls: The number of records that are processed in parallel.
This can be optimized per data set but for generally homogeneous data
sets, should be approximately the number of available CPU cores.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# We prefetch a batch at a time, This can help smooth out the time taken to
# load input files as we go through shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffle the records. Note that we shuffle before repeating to ensure
# that the shuffling respects epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# If we are training over multiple epochs before evaluating, repeat the
# dataset for the appropriate number of epochs.
dataset = dataset.repeat(num_epochs)
# Parse the raw records into images and labels
dataset = dataset.map(lambda value: parse_record_fn(value, is_training),
num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path.
dataset = dataset.prefetch(1)
return dataset
################################################################################
# Functions building the ResNet model.
################################################################################
def batch_norm_relu(inputs, training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def building_block(inputs, filters, training, projection_shortcut, strides,
data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, training, projection_shortcut,
strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, training, data_format)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, training, data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_layer(inputs, filters, block_fn, blocks, strides, training, name,
data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, training, None, 1, data_format)
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet v2 Model.
"""
def __init__(self, resnet_size, num_classes, num_filters, kernel_size,
conv_stride, first_pool_size, first_pool_stride, probe_pool_size,
second_pool_size, second_pool_stride, probe_pool_stride,
block_fn, block_sizes, pool_type, num_probes,
block_strides, final_size, data_format=None):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
probe_pool_size: Number to pool the probes by.
probe_pool_stride: stride size for the probe pooling layer
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer
of the model. This number is then doubled for each subsequent block
layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer.
If none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used
if first_pool_size is None.
second_pool_size: Pool size to be used for the second pooling layer.
second_pool_stride: stride size for the final pooling layer
block_fn: Which block layer function should be used? Pass in one of
the two functions defined above: building_block or bottleneck_block
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
pool_type: 'max' or 'mean'.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
final_size: The expected size of the model after the second pooling.
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.second_pool_size = second_pool_size
self.second_pool_stride = second_pool_stride
self.probe_pool_size = probe_pool_size
self.probe_pool_stride = probe_pool_stride
self.block_fn = block_fn
self.block_sizes = block_sizes
self.block_strides = block_strides
self.final_size = final_size
self.pool_type = pool_type
self.num_probes = num_probes
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with tf.variable_scope('input_transforms'):
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(inputs, [0, 3, 1, 2])
with tf.variable_scope('mentor') as scope:
# mentor
mentor = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'initial_conv')
if self.first_pool_size:
mentor = tf.layers.max_pooling2d(
inputs=mentor, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'initial_max_pool')
mentor_probes = []
probe_count = 0
for i, num_blocks in enumerate(self.block_sizes[0]):
num_filters = self.num_filters * (2**i)
mentor = block_layer(
inputs=mentor, filters=num_filters, block_fn=self.block_fn,
blocks=num_blocks, strides=self.block_strides[i],
training=training, name='mentor_' + 'block_layer{}'.format(i + 1),
data_format=self.data_format)
if probe_count < self.num_probes:
if self.probe_pool_size > 0:
if self.pool_type == 'max':
mentor_probe = tf.layers.max_pooling2d(
inputs=mentor, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentor_probe = tf.identity(mentor, 'mentor_'+'probe_max_pool_' \
+ str(i))
elif self.pool_type == 'mean':
mentor_probe = tf.layers.average_pooling2d(
inputs=mentor, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentor_probe = tf.identity(mentor, 'mentor_'+'probe_mean_pool_' \
+ str(i))
else:
mentor_probe = mentor
mentor_probes.append(mentor_probe)
probe_count+=1
mentor = batch_norm_relu(mentor, training, self.data_format)
mentor = tf.layers.average_pooling2d(
inputs=mentor, pool_size=self.second_pool_size,
strides=self.second_pool_stride, padding='VALID',
data_format=self.data_format)
mentor = tf.identity(mentor, 'mentor_' + 'final_avg_pool')
mentor = tf.reshape(mentor, [-1, self.final_size])
mentor = tf.layers.dense(inputs=mentor, units=self.num_classes)
mentor = tf.identity(mentor, 'mentor_' + 'final_dense')
mentor_probes.append(mentor)
with tf.variable_scope('mentee') as scope:
# mentee
mentee = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'initial_conv')
if self.first_pool_size:
mentee = tf.layers.max_pooling2d(
inputs=mentee, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'initial_max_pool')
probe_count = 0
mentee_probes = []
for i, num_blocks in enumerate(self.block_sizes[1]):
num_filters = self.num_filters * (2**i)
mentee = block_layer(
inputs=mentee, filters=num_filters, block_fn=self.block_fn,
blocks=num_blocks, strides=self.block_strides[i],
training=training, name='mentee_' + 'block_layer{}'.format(i + 1),
data_format=self.data_format)
if probe_count < self.num_probes:
if self.probe_pool_size > 0:
if self.pool_type == 'max':
mentee_probe = tf.layers.max_pooling2d(
inputs=mentee, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \
+ str(i))
elif self.pool_type == 'mean':
mentee_probe = tf.layers.average_pooling2d(
inputs=mentee, pool_size=self.probe_pool_size,
strides=self.probe_pool_stride, padding='SAME',
data_format=self.data_format)
mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \
+ str(i))
else:
mentee_probe=mentee
mentee_probes.append(mentee_probe)
probe_count+=1
mentee = batch_norm_relu(mentee, training, self.data_format)
mentee = tf.layers.average_pooling2d(
inputs=mentee, pool_size=self.second_pool_size,
strides=self.second_pool_stride, padding='VALID',
data_format=self.data_format)
mentee = tf.identity(mentee, 'mentee_' + 'final_avg_pool')
mentee = tf.reshape(mentee, [-1, self.final_size])
mentee = tf.layers.dense(inputs=mentee, units=self.num_classes)
mentee = tf.identity(mentee, 'mentee_' + 'final_dense')
mentee_probes.append(mentee)
probe_cost = tf.constant(0.)
for mentor_feat, mentee_feat in zip(mentor_probes, mentee_probes):
probe_cost = probe_cost + tf.losses.mean_squared_error (
mentor_feat, mentee_feat)
return (mentor, mentee, probe_cost)
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. Should be the same length as
boundary_epochs.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
with tf.variable_scope('learning_rate'):
initial_learning_rate = 0.01 * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.piecewise_constant(global_step, boundaries, vals)
return rval
return learning_rate_fn
def learning_rate_with_decay_2( initial_learning_rate,
batch_size, batch_denom, num_images, boundary_epochs, decay_rates):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. Should be the same length as
boundary_epochs.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
with tf.variable_scope('learning_rate'):
batches_per_epoch = num_images / batch_size
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.piecewise_constant(global_step, boundaries, vals)
return rval
return learning_rate_fn
def distillation_coeff_fn(intital_distillation, global_step):
global_step = tf.cast(global_step, tf.int32)
rval = tf.train.exponential_decay (
intital_distillation,
global_step,
100000,
0.55,
staircase = False)
return rval
def resnet_model_fn(features, labels, mode, model_class, trainee,
distillation_coeff, probes_coeff, resnet_size, num_probes,
weight_decay_coeff, learning_rate_fn_mentor,
learning_rate_fn_mentee, learning_rate_fn_finetune,
momentum, data_format, pool_probes, pool_type,
temperature=1, optimizer='momentum',
loss_filter_fn=None):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
trainee: A string either `'mentee'` or `'mentor`'.
resnet_size: A list of two integers for the size of the ResNet model for
mentor followed by mentee.
weight_decay_coeff: weight decay rate used to regularize learned variables.
distillation_coeff: Weight for distillation.
probes_coeff: weight for probes.
learning_rate_fn_mentor: function that returns the current learning rate given
the current global_step
learning_rate_fn_mentee: function that returns the current learning rate given
the current global_step
learning_rate_fn_finetune: function that returns the current learning rate given
the current global_step
num_probes: How many equally spaced probes do we need.
momentum: momentum term used for optimization.
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
temperature: A value of temperature to use for distillation. Defaults to 1
so that it will remain backward compatible.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
pool_probes: Downsampling for probes.
pool_type: 'max' or 'mean'.
optimizer: 'adam', 'adadelta' and 'momentum' are options.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
with tf.variable_scope('inputs'):
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
model = model_class(resnet_size = resnet_size,
pool_probes = pool_probes,
pool_type = pool_type,
num_probes = num_probes,
data_format = data_format)
logits_mentor, logits_mentee, probe_cost = model(features,
mode == tf.estimator.ModeKeys.TRAIN)
predictions_mentor = {
'classes': tf.argmax(logits_mentor, axis=1),
'probabilities': tf.nn.softmax(logits_mentor,
name='softmax_tensor_mentor'),
}
predictions_mentee = {
'classes': tf.argmax(logits_mentee, axis=1),
'probabilities': tf.nn.softmax(logits_mentee,
name='softmax_tensor_mentee'),
}
if mode == tf.estimator.ModeKeys.PREDICT:
if trainee == 'mentor':
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions_mentor)
elif trainee == 'mentee' or trainee == 'finetune':
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions_mentee)
with tf.variable_scope('distillery'):
temperature_softmax_mentor = tf.nn.softmax((tf.div(logits_mentor,
temperature)), name ='softmax_temperature_tensor_mentor')
distillation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits = tf.div(logits_mentee,temperature),
labels = temperature_softmax_mentor))
tf.identity(distillation_loss, name='distillation_loss')
tf.summary.scalar('distillation_loss', distillation_loss)
tf.summary.scalar('scaled_distillation_loss', distillation_coeff *
distillation_loss)
with tf.variable_scope('cross_entropy'):
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy_mentor = tf.losses.softmax_cross_entropy(
logits=logits_mentor, onehot_labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy_mentor, name='cross_entropy_mentor')
tf.summary.scalar('cross_entropy_mentor', cross_entropy_mentor)
cross_entropy_mentee = tf.losses.softmax_cross_entropy(
logits=logits_mentee, onehot_labels=labels)
tf.identity(cross_entropy_mentee, name='cross_entropy_mentee')
tf.summary.scalar('cross_entropy_mentee', cross_entropy_mentee)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
if not loss_filter_fn:
def loss_filter_fn(name):
return 'batch_normalization' not in name
mentor_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='mentor')
mentee_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='mentee')
with tf.variable_scope('regularizers'):
if weight_decay_coeff > 0:
l2_mentor = weight_decay_coeff * tf.add_n(
[tf.nn.l2_loss(v) for v in mentor_variables
if loss_filter_fn(v.name)])
l2_mentee = weight_decay_coeff * tf.add_n(
[tf.nn.l2_loss(v) for v in mentee_variables
if loss_filter_fn(v.name)])
else:
l2_mentor = tf.constant(0.)
l2_mentee = tf.constant(0.)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.variable_scope('learning_rates'):
global_step = tf.train.get_or_create_global_step()
learning_rate_mentor = learning_rate_fn_mentor(global_step)
learning_rate_mentee = learning_rate_fn_mentee(global_step)
learning_rate_finetune = learning_rate_fn_finetune(global_step)
tf.identity(learning_rate_mentor, name='learning_rate_mentor' )
tf.summary.scalar('learning_rate_mentor', learning_rate_mentor)
tf.identity(learning_rate_mentee, name='learning_rate_mentee' )
tf.summary.scalar('learning_rate_mentee', learning_rate_mentee)
tf.identity(learning_rate_finetune, name='learning_rate_finetune' )
tf.summary.scalar('learning_rate_finetune', learning_rate_finetune)
with tf.variable_scope('mentor_cumulative_loss'):
# Add weight decay and distillation to the loss.
loss_mentor = cross_entropy_mentor + l2_mentor
tf.summary.scalar('objective', loss_mentor)
with tf.variable_scope('mentee_cumulative_loss'):
distillation_coeff_decayed = distillation_coeff_fn(distillation_coeff,
global_step)
probe_scale = probes_coeff * distillation_coeff_decayed
tf.identity(probe_cost, name='probe_cost')
tf.summary.scalar('probe_loss', probe_cost)
tf.summary.scalar('scaled_probe_loss', probe_scale *
probe_cost)
tf.identity(distillation_coeff, name='distillation_coeff_decayed')
tf.summary.scalar('coeff',distillation_coeff_decayed)
loss_mentee = cross_entropy_mentee + l2_mentee + \
distillation_coeff_decayed * distillation_loss + \
probe_scale * probe_cost
tf.summary.scalar('objective', loss_mentee)
with tf.variable_scope('mentee_finetune'):
loss_finetune = cross_entropy_mentee + l2_mentee
tf.summary.scalar('objective', loss_finetune)
if optimizer[0] == 'momentum':
with tf.variable_scope('mentor_momentum_optimizer'):
optimizer_mentor = tf.train.MomentumOptimizer(
learning_rate=learning_rate_mentor,
momentum=momentum)
elif optimizer[0] == 'adam':
with tf.variable_scope('mentor_adam_optimizer'):
optimizer_mentor = tf.train.AdamOptimizer(
learning_rate=learning_rate_mentor)
elif optimizer[0] == 'adadelta':
with tf.variable_scope('mentor_adadelta_optimizer'):
optimizer_mentor = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_mentor)
if optimizer[1] == 'momentum':
with tf.variable_scope('mentee_momentum_optimizer'):
optimizer_mentee = tf.train.MomentumOptimizer(
learning_rate=learning_rate_mentee,
momentum=momentum)
elif optimizer[1] == 'adam':
with tf.variable_scope('mentee_adam_optimizer'):
optimizer_mentee = tf.train.AdamOptimizer(
learning_rate=learning_rate_mentee)
elif optimizer[1] == 'adadelta':
with tf.variable_scope('mentee_adadelta_optimizer'):
optimizer_mentee = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_mentee)
if optimizer[2] == 'momentum':
with tf.variable_scope('finetune_momentum_optimizer'):
optimizer_finetune = tf.train.MomentumOptimizer(
learning_rate=learning_rate_finetune,
momentum=momentum)
elif optimizer[2] == 'adam':
with tf.variable_scope('finetune_adam_optimizer'):
optimizer_finetune = tf.train.AdamOptimizer(
learning_rate=learning_rate_finetune)
elif optimizer[2] == 'adadelta':
with tf.variable_scope('finetune_adadelta_optimizer'):
optimizer_finetune = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_finetune)
# Batch norm requires update ops to be added as a dependency to train_op
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
with tf.variable_scope('optimizers'):
train_op_mentor = optimizer_mentor.minimize(loss_mentor,
global_step,
var_list = mentor_variables)
train_op_mentee = optimizer_mentee.minimize(loss_mentee,
global_step,
var_list = mentee_variables)
train_op_finetune = optimizer_finetune.minimize(loss_finetune,
global_step,
var_list = mentee_variables)
else:
with tf.variable_scope('mentor_cumulative_loss'):
# Add weight decay and distillation to the loss.
loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor
with tf.variable_scope('mentee_cumulative_loss'):
loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee
with tf.variable_scope('mentee_finetune'):
loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee
train_op_mentor = None
train_op_mentee = None
train_op_finetune = None
with tf.variable_scope('metrics'):
accuracy_mentor = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentor['classes'])
accuracy_mentee = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentee['classes'])
metrics = {'accuracy_mentor': accuracy_mentor,
'accuracy_mentee': accuracy_mentee}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy_mentor[1], name='train_accuracy_mentor')
tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1])
tf.identity(accuracy_mentee[1], name='train_accuracy_mentee')
tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1])
saver=tf.train.Saver(var_list = tf.global_variables())
if trainee == 'mentor':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentor,
loss=loss_mentor,
train_op=train_op_mentor,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'mentee':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_mentee,
train_op=train_op_mentee,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'finetune':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_finetune,
train_op=train_op_finetune,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
def resnet_main(flags, model_function, input_function):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
# Set up a RunConfig to only save checkpoints once per training cycle.
run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
mentor = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'weight_decay_coeff': flags.weight_decay_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentor'
})
for i in range(flags.train_epochs_mentor // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentor',
'cross_entropy': 'cross_entropy/cross_entropy_mentor' ,
'train_accuracy': 'metrics/train_accuracy_mentor'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentor training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentor.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentor.evaluate(input_fn=input_fn_eval)
print(eval_results)
mentee = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentee'
})
for i in range(flags.train_epochs_mentee // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
'distillation_loss': 'distillery/distillation_loss',
'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentee.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentee.evaluate(input_fn=input_fn_eval)
print(eval_results)
finetune = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'finetune'
})
for i in range(flags.train_epochs_finetune // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee finetune cycle. [' + str(i) + '/'
+ str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']')
print(' *********************** ' )
finetune.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
# Evaluate the model and print results
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = finetune.evaluate(input_fn=input_fn_eval)
print(eval_results)
class ResnetArgParser(argparse.ArgumentParser):
"""Arguments for configuring and running a Resnet Model.
"""
def __init__(self, resnet_size_choices=None):
super(ResnetArgParser, self).__init__()
self.add_argument(
'--data_dir', type=str, default='./resnet_data',
help='The directory where the input data is stored.')
self.add_argument(
'--num_parallel_calls', type=int, default=5,
help='The number of records that are processed in parallel '
'during input processing. This can be optimized per data set but '
'for generally homogeneous data sets, should be approximately the '
'number of available CPU cores.')
self.add_argument(
'--model_dir', type=str, default='./resnet_model',
help='The directory where the model will be stored.')
self.add_argument(
'--resnet_size_mentor', type=int, default=50,
choices=resnet_size_choices,
help='The size of the ResNet Mentor model to use.')
self.add_argument(
'--resnet_size_mentee', type=int, default=10,
choices=resnet_size_choices,
help='The size of the ResNet Mentee model to use.')
self.add_argument(
'--train_epochs_mentor', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_mentee', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_finetune', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--epochs_per_eval', type=int, default=1,
help='The number of training epochs to run between evaluations.')
self.add_argument(
'--batch_size', type=int, default=32,
help='Batch size for training and evaluation.')
self.add_argument(
'--mentor_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--mentee_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--finetune_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--data_format', type=str, default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. '
'channels_first provides a performance boost on GPU but '
'is not always compatible with CPU. If left unspecified, '
'the data format will be chosen automatically based on '
'whether TensorFlow was built for CPU or GPU.')
self.add_argument(
'--distillation_coeff', type=float, default=0.01,
help='Coefficient of distillation to be applied from parent to'
'child. This is only useful when performing distillaiton.')
self.add_argument(
'--probes_coeff', type=float, default=0.0001,
help='Coefficient of weight to be applied from parent to'
'child. This is only useful when performing mentoring.')
self.add_argument(
'--weight_decay_coeff', type=float, default=0.0002,
help='Coefficient of weight to be applied from to the'
'weight decay regularizer.')
self.add_argument(
'--temperature', type=float, default=3,
help='Temperature to be used for the softmax layer')
self.add_argument(
'--num_probes', type=int, default=0,
help='Number of probes to be used')
self.add_argument(
'--pool_probes', type=int, default=2,
help='Maxpool probes by')
self.add_argument(
'--initial_learning_rate_mentor', type=float, default=0.001,
help='Set initial learning rate for mentor')
self.add_argument(
'--initial_learning_rate_mentee', type=float, default=0.001,
help='Set initial learning rate for mentee')
self.add_argument(
'--initial_learning_rate_finetune', type=float, default=0.001,
help='Set initial learning rate finetune')
self.add_argument(
'--pool_type', type=str, default='max',
help='Pool type for probes.')
| 44.399645
| 139
| 0.67008
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
er_mentee = tf.train.MomentumOptimizer(
learning_rate=learning_rate_mentee,
momentum=momentum)
elif optimizer[1] == 'adam':
with tf.variable_scope('mentee_adam_optimizer'):
optimizer_mentee = tf.train.AdamOptimizer(
learning_rate=learning_rate_mentee)
elif optimizer[1] == 'adadelta':
with tf.variable_scope('mentee_adadelta_optimizer'):
optimizer_mentee = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_mentee)
if optimizer[2] == 'momentum':
with tf.variable_scope('finetune_momentum_optimizer'):
optimizer_finetune = tf.train.MomentumOptimizer(
learning_rate=learning_rate_finetune,
momentum=momentum)
elif optimizer[2] == 'adam':
with tf.variable_scope('finetune_adam_optimizer'):
optimizer_finetune = tf.train.AdamOptimizer(
learning_rate=learning_rate_finetune)
elif optimizer[2] == 'adadelta':
with tf.variable_scope('finetune_adadelta_optimizer'):
optimizer_finetune = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate_finetune)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
with tf.variable_scope('optimizers'):
train_op_mentor = optimizer_mentor.minimize(loss_mentor,
global_step,
var_list = mentor_variables)
train_op_mentee = optimizer_mentee.minimize(loss_mentee,
global_step,
var_list = mentee_variables)
train_op_finetune = optimizer_finetune.minimize(loss_finetune,
global_step,
var_list = mentee_variables)
else:
with tf.variable_scope('mentor_cumulative_loss'):
loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor
with tf.variable_scope('mentee_cumulative_loss'):
loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee
with tf.variable_scope('mentee_finetune'):
loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee
train_op_mentor = None
train_op_mentee = None
train_op_finetune = None
with tf.variable_scope('metrics'):
accuracy_mentor = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentor['classes'])
accuracy_mentee = tf.metrics.accuracy(
tf.argmax(labels, axis=1), predictions_mentee['classes'])
metrics = {'accuracy_mentor': accuracy_mentor,
'accuracy_mentee': accuracy_mentee}
tf.identity(accuracy_mentor[1], name='train_accuracy_mentor')
tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1])
tf.identity(accuracy_mentee[1], name='train_accuracy_mentee')
tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1])
saver=tf.train.Saver(var_list = tf.global_variables())
if trainee == 'mentor':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentor,
loss=loss_mentor,
train_op=train_op_mentor,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'mentee':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_mentee,
train_op=train_op_mentee,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
elif trainee == 'finetune':
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_mentee,
loss=loss_finetune,
train_op=train_op_finetune,
scaffold=tf.train.Scaffold(saver=saver),
eval_metric_ops=metrics)
def resnet_main(flags, model_function, input_function):
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)
mentor = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'weight_decay_coeff': flags.weight_decay_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentor'
})
for i in range(flags.train_epochs_mentor // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentor',
'cross_entropy': 'cross_entropy/cross_entropy_mentor' ,
'train_accuracy': 'metrics/train_accuracy_mentor'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentor training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentor.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentor.evaluate(input_fn=input_fn_eval)
print(eval_results)
mentee = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'mentee'
})
for i in range(flags.train_epochs_mentee // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
'distillation_loss': 'distillery/distillation_loss',
'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee training cycle. [' + str(i) + '/'
+ str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']')
print(' *********************** ' )
mentee.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = mentee.evaluate(input_fn=input_fn_eval)
print(eval_results)
finetune = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags.model_dir,
config=run_config,
params={
'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],
'data_format': flags.data_format,
'batch_size': flags.batch_size,
'distillation_coeff': flags.distillation_coeff,
'probes_coeff': flags.probes_coeff,
'optimizer': [flags.mentor_optimizer,
flags.mentee_optimizer,
flags.finetune_optimizer],
'weight_decay_coeff': flags.weight_decay_coeff,
'temperature': flags.temperature,
'num_probes': flags.num_probes,
'pool_probes': flags.pool_probes,
'train_epochs_mentor': flags.train_epochs_mentor,
'train_epochs_mentee': flags.train_epochs_mentee,
'train_epochs_finetune': flags.train_epochs_finetune,
'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,
'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,
'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,
'pool_type': flags.pool_type,
'trainee': 'finetune'
})
for i in range(flags.train_epochs_finetune // flags.epochs_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rates/learning_rate_mentee',
'cross_entropy': 'cross_entropy/cross_entropy_mentee',
'train_accuracy': 'metrics/train_accuracy_mentee',
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
def input_fn_train():
return input_function(True, flags.data_dir, flags.batch_size,
flags.epochs_per_eval, flags.num_parallel_calls)
print(' *********************** ' )
print(' Starting a mentee finetune cycle. [' + str(i) + '/'
+ str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']')
print(' *********************** ' )
finetune.train(input_fn=input_fn_train, hooks=[logging_hook])
print('Starting to evaluate.')
def input_fn_eval():
return input_function(False, flags.data_dir, flags.batch_size,
1, flags.num_parallel_calls)
eval_results = finetune.evaluate(input_fn=input_fn_eval)
print(eval_results)
class ResnetArgParser(argparse.ArgumentParser):
def __init__(self, resnet_size_choices=None):
super(ResnetArgParser, self).__init__()
self.add_argument(
'--data_dir', type=str, default='./resnet_data',
help='The directory where the input data is stored.')
self.add_argument(
'--num_parallel_calls', type=int, default=5,
help='The number of records that are processed in parallel '
'during input processing. This can be optimized per data set but '
'for generally homogeneous data sets, should be approximately the '
'number of available CPU cores.')
self.add_argument(
'--model_dir', type=str, default='./resnet_model',
help='The directory where the model will be stored.')
self.add_argument(
'--resnet_size_mentor', type=int, default=50,
choices=resnet_size_choices,
help='The size of the ResNet Mentor model to use.')
self.add_argument(
'--resnet_size_mentee', type=int, default=10,
choices=resnet_size_choices,
help='The size of the ResNet Mentee model to use.')
self.add_argument(
'--train_epochs_mentor', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_mentee', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--train_epochs_finetune', type=int, default=100,
help='The number of epochs to use for training.')
self.add_argument(
'--epochs_per_eval', type=int, default=1,
help='The number of training epochs to run between evaluations.')
self.add_argument(
'--batch_size', type=int, default=32,
help='Batch size for training and evaluation.')
self.add_argument(
'--mentor_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--mentee_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--finetune_optimizer', type=str, default='momentum',
help='Optimizer for training and evaluation.')
self.add_argument(
'--data_format', type=str, default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. '
'channels_first provides a performance boost on GPU but '
'is not always compatible with CPU. If left unspecified, '
'the data format will be chosen automatically based on '
'whether TensorFlow was built for CPU or GPU.')
self.add_argument(
'--distillation_coeff', type=float, default=0.01,
help='Coefficient of distillation to be applied from parent to'
'child. This is only useful when performing distillaiton.')
self.add_argument(
'--probes_coeff', type=float, default=0.0001,
help='Coefficient of weight to be applied from parent to'
'child. This is only useful when performing mentoring.')
self.add_argument(
'--weight_decay_coeff', type=float, default=0.0002,
help='Coefficient of weight to be applied from to the'
'weight decay regularizer.')
self.add_argument(
'--temperature', type=float, default=3,
help='Temperature to be used for the softmax layer')
self.add_argument(
'--num_probes', type=int, default=0,
help='Number of probes to be used')
self.add_argument(
'--pool_probes', type=int, default=2,
help='Maxpool probes by')
self.add_argument(
'--initial_learning_rate_mentor', type=float, default=0.001,
help='Set initial learning rate for mentor')
self.add_argument(
'--initial_learning_rate_mentee', type=float, default=0.001,
help='Set initial learning rate for mentee')
self.add_argument(
'--initial_learning_rate_finetune', type=float, default=0.001,
help='Set initial learning rate finetune')
self.add_argument(
'--pool_type', type=str, default='max',
help='Pool type for probes.')
| true
| true
|
f716e0e1798b4361d576daa1b6e3bf179cfdaf7c
| 5,362
|
py
|
Python
|
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
|
TanJay/openthread
|
ffd28ebd4d874fbc71f556ced86efc306e6a2d4b
|
[
"BSD-3-Clause"
] | 1
|
2018-12-31T08:12:49.000Z
|
2018-12-31T08:12:49.000Z
|
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
|
syin2/openthread
|
a9f42768ec221380f42bfd311bc68e784b2163a6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
|
syin2/openthread
|
a9f42768ec221380f42bfd311bc68e784b2163a6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER1 = 1
ROUTER1 = 2
LEADER2 = 3
ROUTER2 = 4
MED = 5
DATASET1_TIMESTAMP = 20
DATASET1_CHANNEL = 11
DATASET1_PANID = 0xface
DATASET2_TIMESTAMP = 10
DATASET2_CHANNEL = 12
DATASET2_PANID = 0xafce
class Cert_9_2_12_Announce(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[ROUTER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER2].enable_whitelist()
self.nodes[LEADER2].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[MED].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[MED].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[MED].set_mode('rsn')
self.nodes[MED].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[MED].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER1].start()
self.nodes[LEADER1].set_state('leader')
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[LEADER1].commissioner_start()
time.sleep(3)
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.nodes[LEADER2].set_state('leader')
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[MED].start()
time.sleep(5)
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[LEADER1].announce_begin(0x1000, 1, 1000, ipaddr)
time.sleep(30)
self.assertEqual(self.nodes[LEADER2].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[MED].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER1].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| 40.621212
| 114
| 0.70179
|
import time
import unittest
import node
LEADER1 = 1
ROUTER1 = 2
LEADER2 = 3
ROUTER2 = 4
MED = 5
DATASET1_TIMESTAMP = 20
DATASET1_CHANNEL = 11
DATASET1_PANID = 0xface
DATASET2_TIMESTAMP = 10
DATASET2_CHANNEL = 12
DATASET2_PANID = 0xafce
class Cert_9_2_12_Announce(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[ROUTER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER2].enable_whitelist()
self.nodes[LEADER2].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[MED].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[MED].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID)
self.nodes[MED].set_mode('rsn')
self.nodes[MED].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[MED].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER1].start()
self.nodes[LEADER1].set_state('leader')
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[LEADER1].commissioner_start()
time.sleep(3)
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.nodes[LEADER2].set_state('leader')
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[MED].start()
time.sleep(5)
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[LEADER1].announce_begin(0x1000, 1, 1000, ipaddr)
time.sleep(30)
self.assertEqual(self.nodes[LEADER2].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[MED].get_state(), 'child')
ipaddrs = self.nodes[MED].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
self.assertTrue(self.nodes[LEADER1].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| true
| true
|
f716e10b6e71953481b66f47f8b43c293e3fae0b
| 321
|
py
|
Python
|
wrench/labelmodel/__init__.py
|
rpryzant/wrench
|
3668c359aeff18724e927a207a85da17f2ead823
|
[
"Apache-2.0"
] | 1
|
2021-11-24T04:01:08.000Z
|
2021-11-24T04:01:08.000Z
|
wrench/labelmodel/__init__.py
|
yinkaiw/wrench
|
f20135eb9b1d51b5bad92b3a910efd92235df356
|
[
"Apache-2.0"
] | null | null | null |
wrench/labelmodel/__init__.py
|
yinkaiw/wrench
|
f20135eb9b1d51b5bad92b3a910efd92235df356
|
[
"Apache-2.0"
] | null | null | null |
from .dawid_skene import DawidSkene
from .flyingsquid import FlyingSquid
from .generative_model import GenerativeModel
from .gold import GoldCondProb
from .majority_voting import MajorityVoting, MajorityWeightedVoting
from .metal import MeTaL
from .naive_bayes import NaiveBayesModel
from .snorkel import Snorkel
| 35.666667
| 68
| 0.844237
|
from .dawid_skene import DawidSkene
from .flyingsquid import FlyingSquid
from .generative_model import GenerativeModel
from .gold import GoldCondProb
from .majority_voting import MajorityVoting, MajorityWeightedVoting
from .metal import MeTaL
from .naive_bayes import NaiveBayesModel
from .snorkel import Snorkel
| true
| true
|
f716e2ca2dbea9c8c4a6ac6e99b6f76798d9cf6c
| 3,733
|
py
|
Python
|
python/bot_discord.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | 1
|
2020-10-06T01:20:07.000Z
|
2020-10-06T01:20:07.000Z
|
python/bot_discord.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
python/bot_discord.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
"create by poomipat01"
import asyncio
import discord
import youtube_dl
import os
from discord.ext import commands
def read_token():
with open("token.ini",'r') as f:
lines = f.readline()
return lines.strip()
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
TOKEN = os.environ['TOKEN']
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=1):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(self, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
self.filename = filename
return self(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel):
"""Joins a voice channel"""
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect()
@commands.command()
async def ping(self,ctx):
await ctx.send(f'ping : {round(bot.latency * 1000)}ms')
@commands.command()
async def play(self, ctx, *, url):
async with ctx.typing():
player = await YTDLSource.from_url(url=url, loop=self.bot.loop)
ctx.voice_client.play(player)
await ctx.send('Now playing: {}'.format(player.title))
@commands.command()
async def volume(self, ctx, volume: int):
"""Changes the player's volume"""
if ctx.voice_client is None:
return await ctx.send("Not connected to a voice channel.")
ctx.voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
@commands.command()
async def stop(self, ctx):
"""Stops and disconnects the bot from voice"""
await ctx.voice_client.disconnect()
@play.before_invoke
async def ensure_voice(self, ctx):
if ctx.voice_client is None:
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
await ctx.send("You are not connected to a voice channel.")
raise commands.CommandError("Author not connected to a voice channel.")
elif ctx.voice_client.is_playing():
ctx.voice_client.stop()
filename = YTDLSource.filename
if os.path.exists(filename):
os.remove(filename)
bot = commands.Bot(command_prefix=commands.when_mentioned_or("$"))
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Game('$help for information'))
print("Bot online!!!")
bot.add_cog(Music(bot))
bot.run(TOKEN)
| 30.104839
| 107
| 0.639432
|
import asyncio
import discord
import youtube_dl
import os
from discord.ext import commands
def read_token():
with open("token.ini",'r') as f:
lines = f.readline()
return lines.strip()
youtube_dl.utils.bug_reports_message = lambda: ''
TOKEN = os.environ['TOKEN']
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=1):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(self, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
self.filename = filename
return self(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel):
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect()
@commands.command()
async def ping(self,ctx):
await ctx.send(f'ping : {round(bot.latency * 1000)}ms')
@commands.command()
async def play(self, ctx, *, url):
async with ctx.typing():
player = await YTDLSource.from_url(url=url, loop=self.bot.loop)
ctx.voice_client.play(player)
await ctx.send('Now playing: {}'.format(player.title))
@commands.command()
async def volume(self, ctx, volume: int):
if ctx.voice_client is None:
return await ctx.send("Not connected to a voice channel.")
ctx.voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
@commands.command()
async def stop(self, ctx):
await ctx.voice_client.disconnect()
@play.before_invoke
async def ensure_voice(self, ctx):
if ctx.voice_client is None:
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
await ctx.send("You are not connected to a voice channel.")
raise commands.CommandError("Author not connected to a voice channel.")
elif ctx.voice_client.is_playing():
ctx.voice_client.stop()
filename = YTDLSource.filename
if os.path.exists(filename):
os.remove(filename)
bot = commands.Bot(command_prefix=commands.when_mentioned_or("$"))
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Game('$help for information'))
print("Bot online!!!")
bot.add_cog(Music(bot))
bot.run(TOKEN)
| true
| true
|
f716e5b3df3c3c98ce55161f28ad6090b87813a0
| 602
|
py
|
Python
|
quickstart/python/sms/example-2/send_notifications.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 234
|
2016-01-27T03:04:38.000Z
|
2022-02-25T20:13:43.000Z
|
quickstart/python/sms/example-2/send_notifications.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 351
|
2016-04-06T16:55:33.000Z
|
2022-03-10T18:42:36.000Z
|
quickstart/python/sms/example-2/send_notifications.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 494
|
2016-03-30T15:28:20.000Z
|
2022-03-28T19:39:36.000Z
|
# /usr/bin/env python
# Download the twilio-python library from twilio.com/docs/libraries/python
import os
from twilio.rest import Client
# Find these values at https://twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = "YYYYYYYYYYYYYYYYYY"
client = Client(account_sid, auth_token)
message = client.api.account.messages.create(
to="+12316851234",
from_="+15555555555",
body="Hello there!",
media_url=['https://demo.twilio.com/owl.png',
'https://demo.twilio.com/logo.png'])
| 33.444444
| 74
| 0.727575
|
import os
from twilio.rest import Client
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = "YYYYYYYYYYYYYYYYYY"
client = Client(account_sid, auth_token)
message = client.api.account.messages.create(
to="+12316851234",
from_="+15555555555",
body="Hello there!",
media_url=['https://demo.twilio.com/owl.png',
'https://demo.twilio.com/logo.png'])
| true
| true
|
f716e7003ce379bfc28bec594685939ffeb73fea
| 1,069
|
py
|
Python
|
award/migrations/0003_rating.py
|
EmmanuelMuchiri/Awards
|
d786689a6f5f32532d005ef6a50eed4600ba5ecc
|
[
"MIT"
] | null | null | null |
award/migrations/0003_rating.py
|
EmmanuelMuchiri/Awards
|
d786689a6f5f32532d005ef6a50eed4600ba5ecc
|
[
"MIT"
] | 5
|
2020-06-05T22:45:28.000Z
|
2021-09-08T01:16:58.000Z
|
award/migrations/0003_rating.py
|
EmmanuelMuchiri/Awards
|
d786689a6f5f32532d005ef6a50eed4600ba5ecc
|
[
"MIT"
] | 3
|
2019-09-09T08:16:01.000Z
|
2019-11-25T11:37:58.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-07 12:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('award', '0002_project'),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField(blank=True, default=0)),
('usability', models.IntegerField(blank=True, default=0)),
('creativity', models.IntegerField(blank=True, default=0)),
('overall_rating', models.IntegerField(blank=True, default=0)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Profile')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Project')),
],
),
]
| 36.862069
| 114
| 0.615529
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('award', '0002_project'),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField(blank=True, default=0)),
('usability', models.IntegerField(blank=True, default=0)),
('creativity', models.IntegerField(blank=True, default=0)),
('overall_rating', models.IntegerField(blank=True, default=0)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Profile')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Project')),
],
),
]
| true
| true
|
f716e8fadfc4a02237cc5c211b47dc9372ec31ac
| 4,293
|
py
|
Python
|
salamanca/cli.py
|
coroa/salamanca
|
29da72cc40dd511c81bfdcb71ac956a24de1148b
|
[
"Apache-2.0"
] | null | null | null |
salamanca/cli.py
|
coroa/salamanca
|
29da72cc40dd511c81bfdcb71ac956a24de1148b
|
[
"Apache-2.0"
] | null | null | null |
salamanca/cli.py
|
coroa/salamanca
|
29da72cc40dd511c81bfdcb71ac956a24de1148b
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
from salamanca import data
from salamanca import currency
COMMANDS = {}
#
# Download wb data
#
def download_wb_cli(parser):
log = 'Print log output during download.'
parser.add_argument('--log', help=log, action="store_true")
overwrite = 'Overwrite local files if they exist.'
parser.add_argument('--overwrite', help=overwrite, action="store_true")
def download_wb(log=False, overwrite=False, **kwargs):
if log:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
wb = data.WorldBank()
wb.iso_metadata(overwrite=overwrite)
for ind in data.INDICATORS_WB:
wb.query(ind, overwrite=overwrite)
COMMANDS['download_wb'] = (
"""Download national World Bank data to your machine""",
download_wb_cli,
download_wb,
)
#
# Currency Exchange
#
def exchange_cli(parser):
amt = 'quantity of currency (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, default=1.0)
units = "units in which to do conversion [MER or PPP] (default: MER)"
parser.add_argument('-u', '--units', help=units, default='MER')
meth = "method to use to do conversion [deflator or cpi] (default: deflator)"
parser.add_argument('-m', '--meth', help=meth, default='deflator')
required = parser.add_argument_group('required arguments')
_from = """
ISO: 3-letter ISO code for the origin country, YEAR: origin year
"""
required.add_argument('-f', '--from', help=_from,
nargs=2, metavar=('ISO', 'YEAR'), required=True)
_to = """
ISO: 3-letter ISO code for the destination country, YEAR: destination year
"""
required.add_argument('-t', '--to', help=_to,
nargs=2, metavar=('ISO', 'YEAR'), required=True)
def exchange(**kwargs):
amt = kwargs['amt']
fromiso, fromyr = kwargs['from']
toiso, toyr = kwargs['to']
units = kwargs['units']
inflation_method = kwargs['meth']
xlator = currency.Translator()
ret = xlator.exchange(amt,
fromiso=fromiso, fromyr=fromyr,
toiso=toiso, toyr=toyr,
units=units, inflation_method=inflation_method)
print(ret)
return ret
COMMANDS['exchange'] = (
"""Exchange currency from one country/year to another.""",
exchange_cli,
exchange,
)
def to_ppp_cli(parser):
amt = 'quantity of currency in MER (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0)
iso = '3-letter ISO code for the country'
parser.add_argument('--iso', help=iso)
year = 'year of conversion'
parser.add_argument('--year', type=int, help=year)
def to_ppp(**kwargs):
amt = kwargs['amt']
iso = kwargs['iso']
year = kwargs['year']
xlator = currency.Translator()
ret = amt * xlator.mer_to_ppp(iso, year)
print(ret)
return ret
COMMANDS['to_ppp'] = (
"""Exchange currency in MER to PPP.""",
to_ppp_cli,
to_ppp,
)
def to_mer_cli(parser):
amt = 'quantity of currency in PPP (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0)
iso = '3-letter ISO code for the country'
parser.add_argument('--iso', help=iso)
year = 'year of conversion'
parser.add_argument('--year', type=int, help=year)
def to_mer(**kwargs):
amt = kwargs['amt']
iso = kwargs['iso']
year = int(kwargs['year'])
xlator = currency.Translator()
ret = amt / xlator.mer_to_ppp(iso, year)
print(ret)
return ret
COMMANDS['to_mer'] = (
"""Exchange currency in PPP to MER.""",
to_mer_cli,
to_mer,
)
def main():
descr = """
Main CLI for salamanca.
"""
parser = argparse.ArgumentParser(
description=descr,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(dest='command')
for cmd in COMMANDS:
cli_help = COMMANDS[cmd][0]
cli_func = COMMANDS[cmd][1]
subparser = subparsers.add_parser(
cmd,
help=cli_help,
)
cli_func(subparser)
args = parser.parse_args()
cmd = args.command
cmd_func = COMMANDS[cmd][2]
cmd_func(**vars(args))
if __name__ == '__main__':
main()
| 25.553571
| 81
| 0.623107
|
import argparse
import logging
from salamanca import data
from salamanca import currency
COMMANDS = {}
def download_wb_cli(parser):
log = 'Print log output during download.'
parser.add_argument('--log', help=log, action="store_true")
overwrite = 'Overwrite local files if they exist.'
parser.add_argument('--overwrite', help=overwrite, action="store_true")
def download_wb(log=False, overwrite=False, **kwargs):
if log:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
wb = data.WorldBank()
wb.iso_metadata(overwrite=overwrite)
for ind in data.INDICATORS_WB:
wb.query(ind, overwrite=overwrite)
COMMANDS['download_wb'] = (
"""Download national World Bank data to your machine""",
download_wb_cli,
download_wb,
)
def exchange_cli(parser):
amt = 'quantity of currency (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, default=1.0)
units = "units in which to do conversion [MER or PPP] (default: MER)"
parser.add_argument('-u', '--units', help=units, default='MER')
meth = "method to use to do conversion [deflator or cpi] (default: deflator)"
parser.add_argument('-m', '--meth', help=meth, default='deflator')
required = parser.add_argument_group('required arguments')
_from = """
ISO: 3-letter ISO code for the origin country, YEAR: origin year
"""
required.add_argument('-f', '--from', help=_from,
nargs=2, metavar=('ISO', 'YEAR'), required=True)
_to = """
ISO: 3-letter ISO code for the destination country, YEAR: destination year
"""
required.add_argument('-t', '--to', help=_to,
nargs=2, metavar=('ISO', 'YEAR'), required=True)
def exchange(**kwargs):
amt = kwargs['amt']
fromiso, fromyr = kwargs['from']
toiso, toyr = kwargs['to']
units = kwargs['units']
inflation_method = kwargs['meth']
xlator = currency.Translator()
ret = xlator.exchange(amt,
fromiso=fromiso, fromyr=fromyr,
toiso=toiso, toyr=toyr,
units=units, inflation_method=inflation_method)
print(ret)
return ret
COMMANDS['exchange'] = (
"""Exchange currency from one country/year to another.""",
exchange_cli,
exchange,
)
def to_ppp_cli(parser):
amt = 'quantity of currency in MER (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0)
iso = '3-letter ISO code for the country'
parser.add_argument('--iso', help=iso)
year = 'year of conversion'
parser.add_argument('--year', type=int, help=year)
def to_ppp(**kwargs):
amt = kwargs['amt']
iso = kwargs['iso']
year = kwargs['year']
xlator = currency.Translator()
ret = amt * xlator.mer_to_ppp(iso, year)
print(ret)
return ret
COMMANDS['to_ppp'] = (
"""Exchange currency in MER to PPP.""",
to_ppp_cli,
to_ppp,
)
def to_mer_cli(parser):
amt = 'quantity of currency in PPP (default: 1.0)'
parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0)
iso = '3-letter ISO code for the country'
parser.add_argument('--iso', help=iso)
year = 'year of conversion'
parser.add_argument('--year', type=int, help=year)
def to_mer(**kwargs):
amt = kwargs['amt']
iso = kwargs['iso']
year = int(kwargs['year'])
xlator = currency.Translator()
ret = amt / xlator.mer_to_ppp(iso, year)
print(ret)
return ret
COMMANDS['to_mer'] = (
"""Exchange currency in PPP to MER.""",
to_mer_cli,
to_mer,
)
def main():
descr = """
Main CLI for salamanca.
"""
parser = argparse.ArgumentParser(
description=descr,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(dest='command')
for cmd in COMMANDS:
cli_help = COMMANDS[cmd][0]
cli_func = COMMANDS[cmd][1]
subparser = subparsers.add_parser(
cmd,
help=cli_help,
)
cli_func(subparser)
args = parser.parse_args()
cmd = args.command
cmd_func = COMMANDS[cmd][2]
cmd_func(**vars(args))
if __name__ == '__main__':
main()
| true
| true
|
f716ea6ba7a5521a99b2eb2280b6736549b4ed5d
| 1,648
|
py
|
Python
|
src/tests/unit/common/test_css.py
|
td00/pretalx
|
aff450de9420fca167e04345fa24ee7140fae819
|
[
"Apache-2.0"
] | null | null | null |
src/tests/unit/common/test_css.py
|
td00/pretalx
|
aff450de9420fca167e04345fa24ee7140fae819
|
[
"Apache-2.0"
] | null | null | null |
src/tests/unit/common/test_css.py
|
td00/pretalx
|
aff450de9420fca167e04345fa24ee7140fae819
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from django.core.exceptions import ValidationError
from pretalx.common.css import validate_css
from pretalx.event.models import Event
@pytest.fixture
def valid_css():
return '''
body {
background-color: #000;
display: none;
}
.some-descriptor {
border-style: dotted dashed solid double;
BORDER-color: red green blue yellow;
}
#best-descriptor {
border: 5px solid red;
}
'''
@pytest.fixture
def invalid_css(valid_css):
return valid_css + '''
a.other-descriptor {
content: url("https://malicious.site.com");
}
'''
@pytest.fixture
def some_object():
class Foo:
pass
return Foo()
def test_valid_css(valid_css):
assert validate_css(valid_css) == valid_css
def test_invalid_css(invalid_css):
with pytest.raises(ValidationError):
validate_css(invalid_css)
@pytest.mark.django_db
def test_regenerate_css(event):
from pretalx.common.tasks import regenerate_css
event.primary_color = '#00ff00'
event.save()
regenerate_css(event.pk)
event = Event.objects.get(pk=event.pk)
for local_app in ['agenda', 'cfp', 'orga']:
assert event.settings.get(f'{local_app}_css_file')
assert event.settings.get(f'{local_app}_css_checksum')
@pytest.mark.django_db
def test_regenerate_css_no_color(event):
from pretalx.common.tasks import regenerate_css
event.primary_color = None
event.save()
regenerate_css(event.pk)
event = Event.objects.get(pk=event.pk)
for local_app in ['agenda', 'cfp', 'orga']:
assert not event.settings.get(f'{local_app}_css_file')
assert not event.settings.get(f'{local_app}_css_checksum')
| 22.888889
| 66
| 0.711772
|
import pytest
from django.core.exceptions import ValidationError
from pretalx.common.css import validate_css
from pretalx.event.models import Event
@pytest.fixture
def valid_css():
return '''
body {
background-color: #000;
display: none;
}
.some-descriptor {
border-style: dotted dashed solid double;
BORDER-color: red green blue yellow;
}
#best-descriptor {
border: 5px solid red;
}
'''
@pytest.fixture
def invalid_css(valid_css):
return valid_css + '''
a.other-descriptor {
content: url("https://malicious.site.com");
}
'''
@pytest.fixture
def some_object():
class Foo:
pass
return Foo()
def test_valid_css(valid_css):
assert validate_css(valid_css) == valid_css
def test_invalid_css(invalid_css):
with pytest.raises(ValidationError):
validate_css(invalid_css)
@pytest.mark.django_db
def test_regenerate_css(event):
from pretalx.common.tasks import regenerate_css
event.primary_color = '#00ff00'
event.save()
regenerate_css(event.pk)
event = Event.objects.get(pk=event.pk)
for local_app in ['agenda', 'cfp', 'orga']:
assert event.settings.get(f'{local_app}_css_file')
assert event.settings.get(f'{local_app}_css_checksum')
@pytest.mark.django_db
def test_regenerate_css_no_color(event):
from pretalx.common.tasks import regenerate_css
event.primary_color = None
event.save()
regenerate_css(event.pk)
event = Event.objects.get(pk=event.pk)
for local_app in ['agenda', 'cfp', 'orga']:
assert not event.settings.get(f'{local_app}_css_file')
assert not event.settings.get(f'{local_app}_css_checksum')
| true
| true
|
f716ec2ee533f960f1e83f092c89ca170e08c6c2
| 679
|
py
|
Python
|
bin/django-admin.py
|
hkolstee/bachelor-project
|
5d26632c2d920327248efdabf2acc53781264dc2
|
[
"MIT"
] | null | null | null |
bin/django-admin.py
|
hkolstee/bachelor-project
|
5d26632c2d920327248efdabf2acc53781264dc2
|
[
"MIT"
] | null | null | null |
bin/django-admin.py
|
hkolstee/bachelor-project
|
5d26632c2d920327248efdabf2acc53781264dc2
|
[
"MIT"
] | null | null | null |
#!/home/hkolstee/bproject/virtenv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 30.863636
| 80
| 0.726068
|
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| true
| true
|
f716ecc7b5dee823c09a28bd380274cc0c0c6946
| 2,069
|
py
|
Python
|
ros/src/twist_controller/twist_controller.py
|
gradient100/Capstone
|
9a4a5fdc22a994b1cfdbef19b66fcb4c5b3b562e
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
gradient100/Capstone
|
9a4a5fdc22a994b1cfdbef19b66fcb4c5b3b562e
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
gradient100/Capstone
|
9a4a5fdc22a994b1cfdbef19b66fcb4c5b3b562e
|
[
"MIT"
] | null | null | null |
import rospy
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
tMin = 0. # Min throttle
tMax = 0.2 # Max throttle
self.throttle_controller = PID(kp, ki, kd, tMin, tMax)
tau = 0.5 # 1/(2pi*tau) = cutoff frequency
ts = 0.02 # Sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, targ_linear_vel, targ_angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(targ_linear_vel, targ_angular_vel, current_vel)
vel_diff = current_vel - targ_linear_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time-self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(-vel_diff, sample_time) # -vel_diff because vel_diff is neg if currently going slower than desired speed. So, we want to throttle, but pid controller is expecting a positive diff
brake = 0.
if targ_linear_vel == 0. and current_vel < 0.1:
throttle = 0.
brake = 700. #N*m
elif throttle < .1 and vel_diff > 0:
throttle = 0.
#decel = min(abs(vel_diff), abs(self.decel_limit))
decel = min(abs(1.0*vel_diff/sample_time), abs(self.decel_limit))
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
#brake = min(abs(decel)*self.vehicle_mass*self.wheel_radius,700)
return throttle, brake, steering
| 32.84127
| 221
| 0.760754
|
import rospy
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit, accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
tMin = 0.
tMax = 0.2
self.throttle_controller = PID(kp, ki, kd, tMin, tMax)
tau = 0.5
ts = 0.02
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, targ_linear_vel, targ_angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(targ_linear_vel, targ_angular_vel, current_vel)
vel_diff = current_vel - targ_linear_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time-self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(-vel_diff, sample_time)
brake = 0.
if targ_linear_vel == 0. and current_vel < 0.1:
throttle = 0.
brake = 700.
elif throttle < .1 and vel_diff > 0:
throttle = 0.
decel = min(abs(1.0*vel_diff/sample_time), abs(self.decel_limit))
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
return throttle, brake, steering
| false
| true
|
f716ece959d936bd66bf82233eb4b71aa5c73834
| 2,091
|
py
|
Python
|
quant/utils/dingding.py
|
tianhm/TheNextQuant
|
a0d062fe8160088118b128d757d01b396c129680
|
[
"MIT"
] | 1
|
2020-03-24T02:19:20.000Z
|
2020-03-24T02:19:20.000Z
|
quant/utils/dingding.py
|
tianhm/TheNextQuant
|
a0d062fe8160088118b128d757d01b396c129680
|
[
"MIT"
] | null | null | null |
quant/utils/dingding.py
|
tianhm/TheNextQuant
|
a0d062fe8160088118b128d757d01b396c129680
|
[
"MIT"
] | 5
|
2019-08-12T09:40:27.000Z
|
2022-01-26T07:36:24.000Z
|
# -*- coding:utf-8 -*-
"""
钉钉机器人接口
Author: HuangTao
Date: 2018/08/04
Update: 2018/12/24 1. 增加markdown格式消息推送;
"""
from quant.utils import logger
from quant.utils.http_client import AsyncHttpRequests
class DingTalk:
""" 钉钉机器人接口
"""
BASE_URL = 'https://oapi.dingtalk.com/robot/send?access_token='
@classmethod
async def send_text_msg(cls, access_token, content, phones=None, is_at_all=False):
""" 发送文本消息
@param access_token 钉钉消息access_token
@param content 消息内容
@param phones 需要@提醒的群成员手机号列表
@param is_at_all 是否需要@所有人,默认为False
"""
body = {
'msgtype': 'text',
'text': {
'content': content
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
@classmethod
async def send_markdown_msg(cls, access_token, title, text, phones=None, is_at_all=False):
""" 发送文本消息
@param access_token 钉钉消息access_token
@param title 首屏会话透出的展示内容
@param text markdown格式的消息
@param phones 需要@提醒的群成员手机号列表
@param is_at_all 是否需要@所有人,默认为False
"""
body = {
'msgtype': 'markdown',
'markdown': {
'title': title,
'text': text
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
| 30.304348
| 94
| 0.568149
|
from quant.utils import logger
from quant.utils.http_client import AsyncHttpRequests
class DingTalk:
BASE_URL = 'https://oapi.dingtalk.com/robot/send?access_token='
@classmethod
async def send_text_msg(cls, access_token, content, phones=None, is_at_all=False):
body = {
'msgtype': 'text',
'text': {
'content': content
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
@classmethod
async def send_markdown_msg(cls, access_token, title, text, phones=None, is_at_all=False):
body = {
'msgtype': 'markdown',
'markdown': {
'title': title,
'text': text
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
| true
| true
|
f716ed7302b9f41211156c88b591db55e8d6fd9c
| 5,726
|
py
|
Python
|
threedi_api_client/openapi/models/inline_response20068.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
threedi_api_client/openapi/models/inline_response20068.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | 16
|
2021-05-31T09:52:04.000Z
|
2022-03-14T16:07:19.000Z
|
threedi_api_client/openapi/models/inline_response20068.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
3Di API
3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501
The version of the OpenAPI document: v3
Contact: info@nelen-schuurmans.nl
Generated by: https://openapi-generator.tech
"""
import logging
import pprint
import re # noqa: F401
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class InlineResponse20068(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[ThreediModelTask]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse20068 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
self.next = next
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse20068. # noqa: E501
:return: The count of this InlineResponse20068. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse20068.
:param count: The count of this InlineResponse20068. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and count is None: # noqa: E501
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse20068. # noqa: E501
:return: The next of this InlineResponse20068. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse20068.
:param next: The next of this InlineResponse20068. # noqa: E501
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse20068. # noqa: E501
:return: The previous of this InlineResponse20068. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse20068.
:param previous: The previous of this InlineResponse20068. # noqa: E501
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse20068. # noqa: E501
:return: The results of this InlineResponse20068. # noqa: E501
:rtype: list[ThreediModelTask]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse20068.
:param results: The results of this InlineResponse20068. # noqa: E501
:type: list[ThreediModelTask]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20068):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse20068):
return True
return self.to_dict() != other.to_dict()
| 28.346535
| 166
| 0.59029
|
import logging
import pprint
import re
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class InlineResponse20068(object):
openapi_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[ThreediModelTask]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
self.next = next
self.previous = previous
self.results = results
@property
def count(self):
return self._count
@count.setter
def count(self, count):
if self.local_vars_configuration.client_side_validation and count is None:
raise ValueError("Invalid value for `count`, must not be `None`")
self._count = count
@property
def next(self):
return self._next
@next.setter
def next(self, next):
self._next = next
@property
def previous(self):
return self._previous
@previous.setter
def previous(self, previous):
self._previous = previous
@property
def results(self):
return self._results
@results.setter
def results(self, results):
if self.local_vars_configuration.client_side_validation and results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, InlineResponse20068):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, InlineResponse20068):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f716eee3eb58697b715967ba75f76f3d236c3384
| 3,353
|
py
|
Python
|
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
minigrid_basics/examples/rw_four_directions.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Example that uses Gym-Minigrid, a custom environment, and custom actions.
Gym-Minigrid has a larger action space that is not standard in reinforcement
learning. By default, the actions are {rotate left, rotate right, forward, pick
up object, drop object, toggle/activate object, done}. This example uses a class
overridden to have the standard 4 directional actions: {left, right, up, down}.
Here we have a random agent interacting with the environment. In this case, we
also use a custom environment, which is likely what one will do in their
research. We are writing the agent observations to the disk just as a simple way
to get some feedback of what is going on.
Sample run:
```
python -m minigrid_basics.examples.rw_four_directions \
--gin_bindings="MonMiniGridEnv.stochasticity=0.1"
```
"""
import os
from absl import app
from absl import flags
import gin
import gym
import gym_minigrid # pylint: disable=unused-import
from gym_minigrid.wrappers import RGBImgObsWrapper
import matplotlib.pylab as plt
import tensorflow as tf
from minigrid_basics.custom_wrappers import tabular_wrapper # pylint: disable=unused-import
from minigrid_basics.envs import mon_minigrid
FLAGS = flags.FLAGS
flags.DEFINE_string('file_path', '/tmp/rw_four_directions',
'Path in which we will save the observations.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override default parameter values '
'(e.g. "MonMiniGridEnv.stochasticity=0.1").')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config_files_and_bindings(
[os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],
bindings=FLAGS.gin_bindings,
skip_unknown=False)
env_id = mon_minigrid.register_environment()
env = gym.make(env_id)
env = RGBImgObsWrapper(env) # Get pixel observations
# Get tabular observation and drop the 'mission' field:
env = tabular_wrapper.TabularWrapper(env, get_rgb=True)
env.reset()
num_frames = 0
max_num_frames = 500
if not tf.io.gfile.exists(FLAGS.file_path):
tf.io.gfile.makedirs(FLAGS.file_path)
undisc_return = 0
while num_frames < max_num_frames:
# Act randomly
obs, reward, done, _ = env.step(env.action_space.sample())
undisc_return += reward
num_frames += 1
print('t:', num_frames, ' s:', obs['state'])
# Draw environment frame just for simple visualization
plt.imshow(obs['image'])
path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))
plt.savefig(path)
plt.clf()
if done:
break
print('Undiscounted return: %.2f' % undisc_return)
env.close()
if __name__ == '__main__':
app.run(main)
| 31.632075
| 92
| 0.737548
|
import os
from absl import app
from absl import flags
import gin
import gym
import gym_minigrid
from gym_minigrid.wrappers import RGBImgObsWrapper
import matplotlib.pylab as plt
import tensorflow as tf
from minigrid_basics.custom_wrappers import tabular_wrapper
from minigrid_basics.envs import mon_minigrid
FLAGS = flags.FLAGS
flags.DEFINE_string('file_path', '/tmp/rw_four_directions',
'Path in which we will save the observations.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override default parameter values '
'(e.g. "MonMiniGridEnv.stochasticity=0.1").')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config_files_and_bindings(
[os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],
bindings=FLAGS.gin_bindings,
skip_unknown=False)
env_id = mon_minigrid.register_environment()
env = gym.make(env_id)
env = RGBImgObsWrapper(env)
env = tabular_wrapper.TabularWrapper(env, get_rgb=True)
env.reset()
num_frames = 0
max_num_frames = 500
if not tf.io.gfile.exists(FLAGS.file_path):
tf.io.gfile.makedirs(FLAGS.file_path)
undisc_return = 0
while num_frames < max_num_frames:
obs, reward, done, _ = env.step(env.action_space.sample())
undisc_return += reward
num_frames += 1
print('t:', num_frames, ' s:', obs['state'])
plt.imshow(obs['image'])
path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))
plt.savefig(path)
plt.clf()
if done:
break
print('Undiscounted return: %.2f' % undisc_return)
env.close()
if __name__ == '__main__':
app.run(main)
| true
| true
|
f716ef059d911c1ddacddb527c6766ef71f00589
| 3,623
|
py
|
Python
|
scripts/run_mots_depth_inference.py
|
VladimirYugay/diw
|
d1a760f1911e9d09fbe038abffc3aa76d384f86a
|
[
"MIT"
] | 1
|
2021-09-14T21:24:56.000Z
|
2021-09-14T21:24:56.000Z
|
scripts/run_mots_depth_inference.py
|
VladimirYugay/diw
|
d1a760f1911e9d09fbe038abffc3aa76d384f86a
|
[
"MIT"
] | null | null | null |
scripts/run_mots_depth_inference.py
|
VladimirYugay/diw
|
d1a760f1911e9d09fbe038abffc3aa76d384f86a
|
[
"MIT"
] | null | null | null |
""" Script for running depth inference assuming MOTS dataset structure """
import logging
import os
import sys
from pathlib import Path, PurePath
import click
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from IPython.core import ultratb
from PIL import Image
import diw
from diw.model import Model, get_vars_to_save_and_restore
sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1)
_logger = logging.getLogger(__name__)
def load_image(img_file):
"""Load image from disk. Output value range: [0,255]."""
return Image.open(img_file).convert("RGB")
def resize_img(img, img_shape):
""" resizes an image """
return img.resize(img_shape, Image.LANCZOS).convert("RGB")
def plot_image(image, image_type="RGB"):
""" plots image with matplotlib """
plt.figure()
color_map = None
if image_type != "RGB":
color_map = plt.cm.get_cmap("plasma").reversed()
plt.imshow(image, cmap=color_map)
plt.show() # display it
return plt
@click.command()
@click.option(
"--checkpoint_dir",
"checkpoint_dir",
default="./data/checkpoints/test",
type=click.Path(exists=True),
help="Path to the model checkpoint",
)
@click.option(
"--data_dir",
"data_dir",
default="./data/test/mots_data",
type=click.Path(exists=True),
help="Path to MOTS data",
)
@click.option(
"--save_img",
"save_img",
flag_value=True,
help="Flag to whether save the image of the depth (besides numpy array)",
)
@click.version_option(diw.__version__)
def main(data_dir, checkpoint_dir, save_img):
if save_img:
plt.figure()
height, width = 128, 416
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" # to fix CUDA bug
inference_model = Model(
is_training=False, batch_size=1, img_height=height, img_width=width
)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
vars_to_restore = get_vars_to_save_and_restore(checkpoint)
saver = tf.train.Saver(vars_to_restore)
with tf.Session() as sess:
saver.restore(sess, checkpoint)
sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()]
for seq_path in sequence_paths:
model_name = PurePath(checkpoint_dir).parts[-1]
(seq_path / model_name).mkdir(parents=True, exist_ok=True)
if save_img:
(seq_path / (model_name + "_depth_images")).mkdir(
parents=True, exist_ok=True
)
img_paths = sorted(
[p for p in (seq_path / "img1").glob("*") if p.is_file()],
key=lambda path: str(path),
)
for img_path in img_paths:
img_name = img_path.parts[-1].split(".")[0]
print("Processing sequence: {}, image: {}".format(seq_path, img_name))
image = load_image(str(img_path))
image = resize_img(image, (width, height))
image = np.array(image)
image = image[None, ...]
depth = inference_model.inference_depth(image, sess)
depth = depth[0, :, :, 0]
np.save(str(seq_path / model_name / img_name), depth)
if save_img:
plt.imshow(depth, plt.cm.get_cmap("plasma").reversed())
plt.savefig(
str(seq_path / (model_name + "_depth_images"))
+ "/"
+ (img_name + ".png")
)
plt.clf()
if __name__ == "__main__":
main()
| 32.348214
| 86
| 0.606956
|
import logging
import os
import sys
from pathlib import Path, PurePath
import click
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from IPython.core import ultratb
from PIL import Image
import diw
from diw.model import Model, get_vars_to_save_and_restore
sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1)
_logger = logging.getLogger(__name__)
def load_image(img_file):
return Image.open(img_file).convert("RGB")
def resize_img(img, img_shape):
return img.resize(img_shape, Image.LANCZOS).convert("RGB")
def plot_image(image, image_type="RGB"):
plt.figure()
color_map = None
if image_type != "RGB":
color_map = plt.cm.get_cmap("plasma").reversed()
plt.imshow(image, cmap=color_map)
plt.show()
return plt
@click.command()
@click.option(
"--checkpoint_dir",
"checkpoint_dir",
default="./data/checkpoints/test",
type=click.Path(exists=True),
help="Path to the model checkpoint",
)
@click.option(
"--data_dir",
"data_dir",
default="./data/test/mots_data",
type=click.Path(exists=True),
help="Path to MOTS data",
)
@click.option(
"--save_img",
"save_img",
flag_value=True,
help="Flag to whether save the image of the depth (besides numpy array)",
)
@click.version_option(diw.__version__)
def main(data_dir, checkpoint_dir, save_img):
if save_img:
plt.figure()
height, width = 128, 416
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
inference_model = Model(
is_training=False, batch_size=1, img_height=height, img_width=width
)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
vars_to_restore = get_vars_to_save_and_restore(checkpoint)
saver = tf.train.Saver(vars_to_restore)
with tf.Session() as sess:
saver.restore(sess, checkpoint)
sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()]
for seq_path in sequence_paths:
model_name = PurePath(checkpoint_dir).parts[-1]
(seq_path / model_name).mkdir(parents=True, exist_ok=True)
if save_img:
(seq_path / (model_name + "_depth_images")).mkdir(
parents=True, exist_ok=True
)
img_paths = sorted(
[p for p in (seq_path / "img1").glob("*") if p.is_file()],
key=lambda path: str(path),
)
for img_path in img_paths:
img_name = img_path.parts[-1].split(".")[0]
print("Processing sequence: {}, image: {}".format(seq_path, img_name))
image = load_image(str(img_path))
image = resize_img(image, (width, height))
image = np.array(image)
image = image[None, ...]
depth = inference_model.inference_depth(image, sess)
depth = depth[0, :, :, 0]
np.save(str(seq_path / model_name / img_name), depth)
if save_img:
plt.imshow(depth, plt.cm.get_cmap("plasma").reversed())
plt.savefig(
str(seq_path / (model_name + "_depth_images"))
+ "/"
+ (img_name + ".png")
)
plt.clf()
if __name__ == "__main__":
main()
| true
| true
|
f716f00b794214b6366e86f868a33212f28fca85
| 2,250
|
py
|
Python
|
custom_components/nintendo_wishlist/__init__.py
|
custom-components/sensor.nintendo_wishlis
|
6709a5c1b6e323494e7449fa1ac24e61100fc302
|
[
"Apache-2.0"
] | 13
|
2020-05-07T21:31:51.000Z
|
2022-02-09T01:53:53.000Z
|
custom_components/nintendo_wishlist/__init__.py
|
custom-components/sensor.nintendo_wishlis
|
6709a5c1b6e323494e7449fa1ac24e61100fc302
|
[
"Apache-2.0"
] | 19
|
2019-07-24T08:10:06.000Z
|
2022-02-05T04:09:34.000Z
|
custom_components/nintendo_wishlist/__init__.py
|
custom-components/sensor.nintendo_wishlis
|
6709a5c1b6e323494e7449fa1ac24e61100fc302
|
[
"Apache-2.0"
] | 5
|
2019-12-13T17:48:52.000Z
|
2020-07-06T07:45:31.000Z
|
"""Nintendo Wishlist integration."""
import logging
import voluptuous as vol
from homeassistant import core
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_COUNTRY, CONF_WISHLIST, DEFAULT_SCAN_INTERVAL, DOMAIN
from .eshop import Country, EShop
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_WISHLIST): cv.ensure_list,
vol.Required(CONF_COUNTRY): cv.enum(Country),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.time_period, cv.positive_timedelta),
}
)
},
# The full HA configurations gets passed to `async_setup` so we need to allow
# extra keys.
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: core.HomeAssistant, config: dict) -> bool:
"""Set up the platform.
@NOTE: `config` is the full dict from `configuration.yaml`.
:returns: A boolean to indicate that initialization was successful.
"""
conf = config[DOMAIN]
country = conf[CONF_COUNTRY].name
wishlist = conf[CONF_WISHLIST]
scan_interval = conf[CONF_SCAN_INTERVAL]
eshop = EShop(country, async_get_clientsession(hass), wishlist)
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name=DOMAIN,
update_method=eshop.fetch_on_sale,
# Polling interval. Will only be polled if there are subscribers.
update_interval=scan_interval,
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
hass.data[DOMAIN] = {
"conf": conf,
"coordinator": coordinator,
}
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, conf))
hass.async_create_task(async_load_platform(hass, "binary_sensor", DOMAIN, {}, conf))
return True
| 33.58209
| 88
| 0.699556
|
import logging
import voluptuous as vol
from homeassistant import core
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_COUNTRY, CONF_WISHLIST, DEFAULT_SCAN_INTERVAL, DOMAIN
from .eshop import Country, EShop
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_WISHLIST): cv.ensure_list,
vol.Required(CONF_COUNTRY): cv.enum(Country),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.time_period, cv.positive_timedelta),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: core.HomeAssistant, config: dict) -> bool:
conf = config[DOMAIN]
country = conf[CONF_COUNTRY].name
wishlist = conf[CONF_WISHLIST]
scan_interval = conf[CONF_SCAN_INTERVAL]
eshop = EShop(country, async_get_clientsession(hass), wishlist)
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=eshop.fetch_on_sale,
update_interval=scan_interval,
)
await coordinator.async_refresh()
hass.data[DOMAIN] = {
"conf": conf,
"coordinator": coordinator,
}
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, conf))
hass.async_create_task(async_load_platform(hass, "binary_sensor", DOMAIN, {}, conf))
return True
| true
| true
|
f716f19491be0a8291d501ec3e6e2ae018304842
| 5,052
|
py
|
Python
|
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
|
JaspervanRooijen/covid-apps-observer
|
59f6049a493c80797d83fd24e4a4789a14f3110e
|
[
"MIT"
] | null | null | null |
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
|
JaspervanRooijen/covid-apps-observer
|
59f6049a493c80797d83fd24e4a4789a14f3110e
|
[
"MIT"
] | null | null | null |
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
|
JaspervanRooijen/covid-apps-observer
|
59f6049a493c80797d83fd24e4a4789a14f3110e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of Androwarn.
#
# Copyright (C) 2012, 2019, Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# Androwarn is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androwarn is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androwarn. If not, see <http://www.gnu.org/licenses/>.
# Global imports
import logging
# Androwarn modules import
from lib.warn.util.util import *
# Logguer
log = logging.getLogger('log')
def detect_telephony_gsm_GsmCellLocation(x):
"""
@param x : a Analysis instance
@rtype : a list strings for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
method_listing = [
("getLac()", "This application reads the Location Area Code value"),
("getCid()", "This application reads the Cell ID value")
]
class_name = 'Landroid/telephony/gsm/GsmCellLocation'
return structural_analysis_search_method_bulk(class_name, method_listing, x)
def detect_Telephony_Manager_Leakages(x) :
"""
@param x : a Analysis instance
@rtype : a list strings for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
method_listing = [
("getCallState()", "This application reads the phone's current state"),
("getCellLocation()", "This application reads the current location of the device"),
("getDataActivity()", "This application reads the type of activity on a data connection"),
("getDataState()", "This application reads the current data connection state"),
("getDeviceId()", "This application reads the unique device ID, i.e the IMEI for GSM and the MEID or ESN for CDMA phones"),
("getDeviceSoftwareVersion()", "This application reads the software version number for the device, for example, the IMEI/SV for GSM phones"),
("getLine1Number()", "This application reads the phone number string for line 1, for example, the MSISDN for a GSM phone"),
("getNeighboringCellInfo()", "This application reads the neighboring cell information of the device"),
("getNetworkCountryIso()", "This application reads the ISO country code equivalent of the current registered operator's MCC (Mobile Country Code)"),
("getNetworkOperator()", "This application reads the numeric name (MCC+MNC) of current registered operator"),
("getNetworkOperatorName()", "This application reads the operator name"),
("getNetworkType()", "This application reads the radio technology (network type) currently in use on the device for data transmission"),
("getPhoneType()", "This application reads the device phone type value"),
("getSimCountryIso()", "This application reads the ISO country code equivalent for the SIM provider's country code"),
("getSimOperator()", "This application reads the MCC+MNC of the provider of the SIM"),
("getSimOperatorName()", "This application reads the Service Provider Name (SPN)"),
("getSimSerialNumber()", "This application reads the SIM's serial number"),
("getSimState()", "This application reads the constant indicating the state of the device SIM card"),
("getSubscriberId()", "This application reads the unique subscriber ID, for example, the IMSI for a GSM phone"),
("getVoiceMailAlphaTag()", "This application reads the alphabetic identifier associated with the voice mail number"),
("getVoiceMailNumber()", "This application reads the voice mail number")
]
class_name = 'Landroid/telephony/TelephonyManager'
return structural_analysis_search_method_bulk(class_name, method_listing, x)
def gather_telephony_identifiers_leakage(x) :
"""
@param x : a Analysis instance
@rtype : a list strings for the concerned category, for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
result = []
result.extend( detect_Telephony_Manager_Leakages(x) )
result.extend( detect_telephony_gsm_GsmCellLocation(x) )
return result
| 54.322581
| 195
| 0.66825
|
import logging
from lib.warn.util.util import *
log = logging.getLogger('log')
def detect_telephony_gsm_GsmCellLocation(x):
method_listing = [
("getLac()", "This application reads the Location Area Code value"),
("getCid()", "This application reads the Cell ID value")
]
class_name = 'Landroid/telephony/gsm/GsmCellLocation'
return structural_analysis_search_method_bulk(class_name, method_listing, x)
def detect_Telephony_Manager_Leakages(x) :
method_listing = [
("getCallState()", "This application reads the phone's current state"),
("getCellLocation()", "This application reads the current location of the device"),
("getDataActivity()", "This application reads the type of activity on a data connection"),
("getDataState()", "This application reads the current data connection state"),
("getDeviceId()", "This application reads the unique device ID, i.e the IMEI for GSM and the MEID or ESN for CDMA phones"),
("getDeviceSoftwareVersion()", "This application reads the software version number for the device, for example, the IMEI/SV for GSM phones"),
("getLine1Number()", "This application reads the phone number string for line 1, for example, the MSISDN for a GSM phone"),
("getNeighboringCellInfo()", "This application reads the neighboring cell information of the device"),
("getNetworkCountryIso()", "This application reads the ISO country code equivalent of the current registered operator's MCC (Mobile Country Code)"),
("getNetworkOperator()", "This application reads the numeric name (MCC+MNC) of current registered operator"),
("getNetworkOperatorName()", "This application reads the operator name"),
("getNetworkType()", "This application reads the radio technology (network type) currently in use on the device for data transmission"),
("getPhoneType()", "This application reads the device phone type value"),
("getSimCountryIso()", "This application reads the ISO country code equivalent for the SIM provider's country code"),
("getSimOperator()", "This application reads the MCC+MNC of the provider of the SIM"),
("getSimOperatorName()", "This application reads the Service Provider Name (SPN)"),
("getSimSerialNumber()", "This application reads the SIM's serial number"),
("getSimState()", "This application reads the constant indicating the state of the device SIM card"),
("getSubscriberId()", "This application reads the unique subscriber ID, for example, the IMSI for a GSM phone"),
("getVoiceMailAlphaTag()", "This application reads the alphabetic identifier associated with the voice mail number"),
("getVoiceMailNumber()", "This application reads the voice mail number")
]
class_name = 'Landroid/telephony/TelephonyManager'
return structural_analysis_search_method_bulk(class_name, method_listing, x)
def gather_telephony_identifiers_leakage(x) :
result = []
result.extend( detect_Telephony_Manager_Leakages(x) )
result.extend( detect_telephony_gsm_GsmCellLocation(x) )
return result
| true
| true
|
f716f2899ec4b9277a6cd89d3948a14978b46dc3
| 4,763
|
py
|
Python
|
izi_shipping/packers.py
|
izi-ecommerce/izi-shipping
|
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
|
[
"BSD-3-Clause"
] | null | null | null |
izi_shipping/packers.py
|
izi-ecommerce/izi-shipping
|
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
|
[
"BSD-3-Clause"
] | null | null | null |
izi_shipping/packers.py
|
izi-ecommerce/izi-shipping
|
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal as D
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from izi.core import loading
Scale = loading.get_class('shipping.scales', 'Scale')
weight_precision = getattr(
settings, 'IZI_SHIPPING_WEIGHT_PRECISION', D('0.000'))
volume_precision = getattr(
settings, 'IZI_SHIPPING_VOLUME_PRECISION', D('0.000'))
# per product defaults
# 0.1m x 0.1m x 0.1m
DEFAULT_BOX = getattr(settings, 'IZI_SHIPPING_DEFAULT_BOX', {'width': float('0.1'),
'height': float('0.1'),
'length': float('0.1')})
# 1 Kg
DEFAULT_WEIGHT = getattr(settings, 'IZI_SHIPPING_DEFAULT_WEIGHT', 1)
# basket volue * VOLUME_RATIO = estimated container(s) volume
# very simple method
VOLUME_RATIO = getattr(settings, 'IZI_SHIPPING_VOLUME_RATIO', D('1.3'))
class Box(object):
height = 0
width = 0
length = 0
def __init__(self, h, w, l):
self.height, self.width, self.length = h, w, l
@property
def volume(self):
return D(self.height*self.width*self.length).quantize(volume_precision)
class Container(Box):
name = ''
def __init__(self, h, w, l, name):
self.name = name
super(Container, self).__init__(h, w, l)
class ProductBox(Box):
"""
'Packs' given product to the virtual box and scale it.
Takes size and weight from product attributes (if present)
"""
weight = 0
def __init__(self,
product,
size_codes=('width', 'height', 'length'),
weight_code='weight',
default_weight=DEFAULT_WEIGHT):
self.attributes = size_codes
attr_vals = {}
scale = Scale(attribute_code=weight_code,
default_weight=default_weight)
try:
for attr in self.attributes:
attr_vals[attr] = product.attribute_values.get(
attribute__code=attr).value
except ObjectDoesNotExist:
attr_vals = DEFAULT_BOX
self.weight = scale.weigh_product(product)
for attr in attr_vals.keys():
setattr(self, attr, attr_vals[attr])
class Packer(object):
"""
To calculate shipping charge the set of containers required.
That set should be enough for all items of basket
which shoud have appropriate attributes (height,width,lenght)
And this is the problem known as Bin Packing Problem
"""
def __init__(self, containers, **kwargs):
self.containers = containers
self.attributes = kwargs.get(
'attribute_codes', ('width', 'height', 'length'))
self.weight_code = kwargs.get('weight_code', 'weight')
self.default_weight = kwargs.get('default_weight', DEFAULT_WEIGHT)
def get_default_container(self, volume):
"""Generates _virtual_ cube container which does not exists in the db
but enough to calculate estimated shipping charge
for the basket's volume given
"""
side = float(volume) ** (1 / 3.0)
return Container(side, side, side, _('virtual volume (%s)') % volume)
def box_product(self, product):
return ProductBox(product, self.attributes, self.weight_code, self.default_weight)
def pack_basket(self, basket):
# First attempt but very weird
volume = 0
weight = 0
box = container = matched = None
for line in basket.lines.all():
box = self.box_product(line.product)
volume += box.volume * line.quantity
weight += box.weight * line.quantity
del box
volume = volume * VOLUME_RATIO
# Calc container volume during DB query excution
# source: http://stackoverflow.com/questions/1652577/django-ordering-queryset-by-a-calculated-field
# as we can't use computed values in the WHERE clause
# we will filter containers as python list
# container = self.containers.extra(select={'volume': 'height*width*lenght'})\
# .extra(order_by=['volume'])\
# .extra(where=['"volume">%s'], params=[volume])[0]
# select containers which volumes greater than summarized basket volume
matched = [c for c in self.containers.all() if c.volume >= volume]
if len(matched) > 0:
container = matched[0]
# TODO: count container's weight - add it to model
else:
container = self.get_default_container(volume)
return [{'weight': D(weight).quantize(weight_precision), 'container': container}]
| 35.81203
| 107
| 0.617888
|
from decimal import Decimal as D
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from izi.core import loading
Scale = loading.get_class('shipping.scales', 'Scale')
weight_precision = getattr(
settings, 'IZI_SHIPPING_WEIGHT_PRECISION', D('0.000'))
volume_precision = getattr(
settings, 'IZI_SHIPPING_VOLUME_PRECISION', D('0.000'))
DEFAULT_BOX = getattr(settings, 'IZI_SHIPPING_DEFAULT_BOX', {'width': float('0.1'),
'height': float('0.1'),
'length': float('0.1')})
DEFAULT_WEIGHT = getattr(settings, 'IZI_SHIPPING_DEFAULT_WEIGHT', 1)
VOLUME_RATIO = getattr(settings, 'IZI_SHIPPING_VOLUME_RATIO', D('1.3'))
class Box(object):
height = 0
width = 0
length = 0
def __init__(self, h, w, l):
self.height, self.width, self.length = h, w, l
@property
def volume(self):
return D(self.height*self.width*self.length).quantize(volume_precision)
class Container(Box):
name = ''
def __init__(self, h, w, l, name):
self.name = name
super(Container, self).__init__(h, w, l)
class ProductBox(Box):
weight = 0
def __init__(self,
product,
size_codes=('width', 'height', 'length'),
weight_code='weight',
default_weight=DEFAULT_WEIGHT):
self.attributes = size_codes
attr_vals = {}
scale = Scale(attribute_code=weight_code,
default_weight=default_weight)
try:
for attr in self.attributes:
attr_vals[attr] = product.attribute_values.get(
attribute__code=attr).value
except ObjectDoesNotExist:
attr_vals = DEFAULT_BOX
self.weight = scale.weigh_product(product)
for attr in attr_vals.keys():
setattr(self, attr, attr_vals[attr])
class Packer(object):
def __init__(self, containers, **kwargs):
self.containers = containers
self.attributes = kwargs.get(
'attribute_codes', ('width', 'height', 'length'))
self.weight_code = kwargs.get('weight_code', 'weight')
self.default_weight = kwargs.get('default_weight', DEFAULT_WEIGHT)
def get_default_container(self, volume):
side = float(volume) ** (1 / 3.0)
return Container(side, side, side, _('virtual volume (%s)') % volume)
def box_product(self, product):
return ProductBox(product, self.attributes, self.weight_code, self.default_weight)
def pack_basket(self, basket):
volume = 0
weight = 0
box = container = matched = None
for line in basket.lines.all():
box = self.box_product(line.product)
volume += box.volume * line.quantity
weight += box.weight * line.quantity
del box
volume = volume * VOLUME_RATIO
# we will filter containers as python list
# container = self.containers.extra(select={'volume': 'height*width*lenght'})\
# .extra(order_by=['volume'])\
# .extra(where=['"volume">%s'], params=[volume])[0]
# select containers which volumes greater than summarized basket volume
matched = [c for c in self.containers.all() if c.volume >= volume]
if len(matched) > 0:
container = matched[0]
# TODO: count container's weight - add it to model
else:
container = self.get_default_container(volume)
return [{'weight': D(weight).quantize(weight_precision), 'container': container}]
| true
| true
|
f716f32b4db4b79169801db929ce86099f51f34b
| 564
|
py
|
Python
|
hw4/part3/table_printer.py
|
jonescarissa/csc221
|
1052b4cf9f3aab86c063c1b3845895a590bc2083
|
[
"CC0-1.0"
] | null | null | null |
hw4/part3/table_printer.py
|
jonescarissa/csc221
|
1052b4cf9f3aab86c063c1b3845895a590bc2083
|
[
"CC0-1.0"
] | null | null | null |
hw4/part3/table_printer.py
|
jonescarissa/csc221
|
1052b4cf9f3aab86c063c1b3845895a590bc2083
|
[
"CC0-1.0"
] | 1
|
2021-09-02T03:55:17.000Z
|
2021-09-02T03:55:17.000Z
|
''' Table Printer practice project
Author: Carissa Jones
'''
tableData = [['I', 'out', 'chair.'],
['just', 'of', 'Im'],
['fell', 'my', 'fine.']]
def printTable(tableData):
'''Given list of strings, tableData, displays in a well-organized
table with each column right-justified'''
colWidths = [0] * len(tableData)
for x in range(len(tableData[0])):
for y in range(len(colWidths)):
print(tableData[y][x].rjust(colWidths[y]), end= ' ')
print(end='\n')
printTable(tableData)
| 23.5
| 69
| 0.56383
|
tableData = [['I', 'out', 'chair.'],
['just', 'of', 'Im'],
['fell', 'my', 'fine.']]
def printTable(tableData):
colWidths = [0] * len(tableData)
for x in range(len(tableData[0])):
for y in range(len(colWidths)):
print(tableData[y][x].rjust(colWidths[y]), end= ' ')
print(end='\n')
printTable(tableData)
| true
| true
|
f716f342dfdde7c09452213431f486564cd316b0
| 20,293
|
py
|
Python
|
blender/arm/lightmapper/utility/encoding.py
|
onelsonic/armory
|
55cfead0844923d419d75bf4bd677ebed714b4b5
|
[
"Zlib"
] | 2,583
|
2016-07-27T08:25:47.000Z
|
2022-03-31T10:42:17.000Z
|
blender/arm/lightmapper/utility/encoding.py
|
onelsonic/armory
|
55cfead0844923d419d75bf4bd677ebed714b4b5
|
[
"Zlib"
] | 2,122
|
2016-07-31T14:20:04.000Z
|
2022-03-31T20:44:14.000Z
|
blender/arm/lightmapper/utility/encoding.py
|
onelsonic/armory
|
55cfead0844923d419d75bf4bd677ebed714b4b5
|
[
"Zlib"
] | 451
|
2016-08-12T05:52:58.000Z
|
2022-03-31T01:33:07.000Z
|
import bpy, math, os, gpu, bgl
import numpy as np
from . import utility
from fractions import Fraction
from gpu_extras.batch import batch_for_shader
def encodeLogLuvGPU(image, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
vec4 LinearToLogLuv( in vec4 value ) {
vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
vec4 vResult;
vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
vResult.w = fract( Le );
vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
return vResult;
//return vec4(Xp_Y_XYZp,1);
}
const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
vec4 LogLuvToLinear( in vec4 value ) {
float Le = value.z * 255.0 + value.w;
vec3 Xp_Y_XYZp;
Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
//return vec4( max( vRGB, 0.0 ), 1.0 );
return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 );
}
void main()
{
//fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454)));
fragColor = LinearToLogLuv(texture(image, texCoord_interp));
//fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp)));
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
#Todo - Find a way to save
#bpy.ops.image.save_all_modified()
def encodeImageRGBDGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBD(vec3 color) {
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBD(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
#Todo - Find a way to save
#bpy.ops.image.save_all_modified()
#TODO - FINISH THIS
def encodeImageRGBMGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBM(vec3 color) {
vec4 rgbm;
color *= 1.0/6.0;
rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) );
rgbm.a = clamp(floor(D) / 255.0, 0., 1.);
rgbm.rgb = color / rgbm.a;
return
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBM(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
#Todo - Find a way to save
#bpy.ops.image.save_all_modified()
def encodeImageRGBMCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
for i in range(0,num_pixels,4):
for j in range(3):
result_pixel[i+j] *= 1.0 / maxRange;
result_pixel[i+3] = saturate(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2], 1e-6))
result_pixel[i+3] = math.ceil(result_pixel[i+3] * 255.0) / 255.0
for j in range(3):
result_pixel[i+j] /= result_pixel[i+3]
target_image.pixels = result_pixel
input_image = target_image
#Save RGBM
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
# input_image.filepath_raw = outDir + "_encoded.png"
# input_image.file_format = "PNG"
# bpy.context.scene.render.image_settings.quality = quality
# input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
#input_image.
#input_image.save()
def saturate(num, floats=True):
if num <= 0:
num = 0
elif num > (1 if floats else 255):
num = (1 if floats else 255)
return num
def maxEps(x):
return max(x, 1e-6)
def encodeImageRGBDCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
rgbdMaxRange = 255.0
for i in range(0,num_pixels,4):
maxRGB = maxEps(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2]))
D = max(rgbdMaxRange/maxRGB, 1.0)
D = np.clip((math.floor(D) / 255.0), 0.0, 1.0)
result_pixel[i] = math.pow(result_pixel[i] * D, 1/2.2)
result_pixel[i+1] = math.pow(result_pixel[i+1] * D, 1/2.2)
result_pixel[i+2] = math.pow(result_pixel[i+2] * D, 1/2.2)
result_pixel[i+3] = D
target_image.pixels = result_pixel
input_image = target_image
#Save RGBD
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
| 31.51087
| 123
| 0.588824
|
import bpy, math, os, gpu, bgl
import numpy as np
from . import utility
from fractions import Fraction
from gpu_extras.batch import batch_for_shader
def encodeLogLuvGPU(image, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
vec4 LinearToLogLuv( in vec4 value ) {
vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
vec4 vResult;
vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
vResult.w = fract( Le );
vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
return vResult;
//return vec4(Xp_Y_XYZp,1);
}
const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
vec4 LogLuvToLinear( in vec4 value ) {
float Le = value.z * 255.0 + value.w;
vec3 Xp_Y_XYZp;
Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
//return vec4( max( vRGB, 0.0 ), 1.0 );
return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 );
}
void main()
{
//fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454)));
fragColor = LinearToLogLuv(texture(image, texCoord_interp));
//fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp)));
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
def encodeImageRGBDGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBD(vec3 color) {
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBD(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
def encodeImageRGBMGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBM(vec3 color) {
vec4 rgbm;
color *= 1.0/6.0;
rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) );
rgbm.a = clamp(floor(D) / 255.0, 0., 1.);
rgbm.rgb = color / rgbm.a;
return
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBM(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
def encodeImageRGBMCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
for i in range(0,num_pixels,4):
for j in range(3):
result_pixel[i+j] *= 1.0 / maxRange;
result_pixel[i+3] = saturate(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2], 1e-6))
result_pixel[i+3] = math.ceil(result_pixel[i+3] * 255.0) / 255.0
for j in range(3):
result_pixel[i+j] /= result_pixel[i+3]
target_image.pixels = result_pixel
input_image = target_image
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
def saturate(num, floats=True):
if num <= 0:
num = 0
elif num > (1 if floats else 255):
num = (1 if floats else 255)
return num
def maxEps(x):
return max(x, 1e-6)
def encodeImageRGBDCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
rgbdMaxRange = 255.0
for i in range(0,num_pixels,4):
maxRGB = maxEps(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2]))
D = max(rgbdMaxRange/maxRGB, 1.0)
D = np.clip((math.floor(D) / 255.0), 0.0, 1.0)
result_pixel[i] = math.pow(result_pixel[i] * D, 1/2.2)
result_pixel[i+1] = math.pow(result_pixel[i+1] * D, 1/2.2)
result_pixel[i+2] = math.pow(result_pixel[i+2] * D, 1/2.2)
result_pixel[i+3] = D
target_image.pixels = result_pixel
input_image = target_image
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
| true
| true
|
f716f3a29096b6b3d1684fa66a9f8119736e670b
| 468
|
py
|
Python
|
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 1
|
2019-03-30T18:14:25.000Z
|
2019-03-30T18:14:25.000Z
|
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 4
|
2016-05-06T17:19:30.000Z
|
2019-03-15T01:51:24.000Z
|
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 4
|
2016-10-18T22:49:08.000Z
|
2019-09-17T11:20:51.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-14 23:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0014_channel_language'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='thumbnail',
field=models.TextField(blank=True, null=True),
),
]
| 22.285714
| 58
| 0.623932
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0014_channel_language'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='thumbnail',
field=models.TextField(blank=True, null=True),
),
]
| true
| true
|
f716f3b18a7254d8f31a8cfae36f39274adaf28e
| 815
|
py
|
Python
|
src/code/sort_clique_data_by_size.py
|
Buddyboy201/clique_analysis
|
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
|
[
"MIT"
] | null | null | null |
src/code/sort_clique_data_by_size.py
|
Buddyboy201/clique_analysis
|
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
|
[
"MIT"
] | null | null | null |
src/code/sort_clique_data_by_size.py
|
Buddyboy201/clique_analysis
|
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
|
[
"MIT"
] | null | null | null |
import json
#top 50 all cliques in category
#top 50 size n till n=2 in category
def update_ref(data, ref):
for clique_str, count in data:
clique = eval(clique_str)
if ref.get(len(clique)) is None:
ref[len(clique)] = []
ref[len(clique)].append((clique, count))
return ref
with open("clique_data.json", "r") as data_file:
data = json.load(data_file)
size_ref = {"total":{}, "water":{}, "interface":{}, "hydrophobic":{}}
size_ref["total"] = update_ref(data["total"], {})
size_ref["water"] = update_ref(data["water"], {})
size_ref["interface"] = update_ref(data["interface"], {})
size_ref["hydrophobic"] = update_ref(data["hydrophobic"], {})
with open("size_sorted_clique_data.json", "w") as dump_file:
json.dump(size_ref, dump_file)
| 32.6
| 73
| 0.628221
|
import json
def update_ref(data, ref):
for clique_str, count in data:
clique = eval(clique_str)
if ref.get(len(clique)) is None:
ref[len(clique)] = []
ref[len(clique)].append((clique, count))
return ref
with open("clique_data.json", "r") as data_file:
data = json.load(data_file)
size_ref = {"total":{}, "water":{}, "interface":{}, "hydrophobic":{}}
size_ref["total"] = update_ref(data["total"], {})
size_ref["water"] = update_ref(data["water"], {})
size_ref["interface"] = update_ref(data["interface"], {})
size_ref["hydrophobic"] = update_ref(data["hydrophobic"], {})
with open("size_sorted_clique_data.json", "w") as dump_file:
json.dump(size_ref, dump_file)
| true
| true
|
f716f3fadbdd20212933b188540902a9450b91e8
| 6,260
|
py
|
Python
|
pimsviewer/dimension.py
|
soft-matter/pimsviewer
|
9263ece121a58a0504c6e4d319ec6e18d1bb460a
|
[
"BSD-3-Clause"
] | 9
|
2018-06-26T06:49:34.000Z
|
2022-03-01T19:54:56.000Z
|
pimsviewer/dimension.py
|
soft-matter/pimsviewer
|
9263ece121a58a0504c6e4d319ec6e18d1bb460a
|
[
"BSD-3-Clause"
] | 14
|
2017-03-02T17:34:08.000Z
|
2020-06-23T15:09:23.000Z
|
pimsviewer/dimension.py
|
soft-matter/pimsviewer
|
9263ece121a58a0504c6e4d319ec6e18d1bb460a
|
[
"BSD-3-Clause"
] | 6
|
2017-03-02T18:36:20.000Z
|
2020-11-22T23:27:14.000Z
|
import os
import numpy as np
from PyQt5 import uic
from PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal
from PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap
from PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog)
class Dimension(QWidget):
_playing = False
_size = 0
_position = 0
_mergeable = False
_merge = False
_playable = False
_fps = 5.0
_max_playback_fps = 5.0
play_event = pyqtSignal(QWidget)
def __init__(self, name, size=0):
super(Dimension, self).__init__()
self.name = name
self._size = size
dirname = os.path.dirname(os.path.realpath(__file__))
uic.loadUi(os.path.join(dirname, 'dimension.ui'), self)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.click_event)
self.playTimer = QTimer()
self.playTimer.timeout.connect(self.play_tick)
self.posButton.pressed.connect(self.update_position_from_btn)
self.slider.setMaximum(self.size-1)
self.slider.valueChanged.connect(self.update_position_from_slider)
self.mergeButton.clicked.connect(self.update_merge)
if not self.mergeable:
self.mergeButton.hide()
self._merge = self.mergeButton.isChecked()
self.fps = self._fps
self.fpsButton.pressed.connect(self.fps_changed)
self.hide()
def merge_image_over_dimension(self, image):
# problem here: could be two axes with same size
# TODO: think of a clever fix for this
try:
ix = image.shape.index(self._size)
except ValueError:
return image
if self.name != 'c':
# I don't know what to do, sum over axis
image = np.sum(image, axis=ix)
return image
def enable(self):
if not self.playable:
return
self.setEnabled(True)
self.playButton.setEnabled(True)
self.posButton.setEnabled(True)
self.slider.setEnabled(True)
self.fpsButton.setEnabled(True)
if self.mergeable:
self.mergeButton.setEnabled(True)
self.mergeButton.show()
self.show()
def disable(self):
self.setEnabled(False)
self.playButton.setEnabled(False)
self.posButton.setEnabled(False)
self.slider.setEnabled(False)
self.fpsButton.setEnabled(False)
self.mergeButton.setEnabled(False)
def fps_changed(self):
fps, ok = QInputDialog.getDouble(self, "Playback framerate", "New playback framerate", self.fps)
if ok:
self.fps = fps
def click_event(self):
if not self.playable:
return
if not self.playing:
self.playing = True
else:
self.playing = False
def play_tick(self):
if not self.playing:
return
if self._fps > self._max_playback_fps:
self.position += int(round(self._fps / self._max_playback_fps))
else:
self.position += 1
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
self.position = 0
self.playing = False
self.slider.setMinimum(0)
self.slider.setMaximum(self.size-1)
@property
def fps(self):
return self._fps
@fps.setter
def fps(self, fps):
fps = float(fps)
self._fps = fps
play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps
self.playTimer.setInterval(int(round(1000.0 / play_fps)))
self.fpsButton.setText('%d fps' % self.fps)
@property
def playable(self):
return self._playable
@playable.setter
def playable(self, playable):
self._playable = bool(playable)
@property
def playing(self):
return self._playing
@playing.setter
def playing(self, playing):
self._playing = bool(playing)
if self._playing:
self.playTimer.start()
else:
self.playTimer.stop()
@property
def position(self):
return self._position
def update_position_from_slider(self):
position = self.slider.value()
if position >= 0:
self.position = position
def update_position_from_btn(self):
position, ok = QInputDialog.getInt(self, "'%s' position" % self.name, "New '%s' position (0-%d)" % (self.name, self.size-1), self.position, 0, self.size-1)
if ok:
self.position = position
@position.setter
def position(self, position):
old_position = self.position
while position < 0:
position += self.size
if position < self.size:
self._position = position
else:
self._position = position - self.size
self.slider.setValue(self.position)
self.posButton.setText('%s=%d' % (self.name, self.position))
if old_position != self.position:
self.play_event.emit(self)
def update_merge(self):
self.merge = self.mergeButton.isChecked()
@property
def merge(self):
return self._merge
@merge.setter
def merge(self, merge):
if not self.mergeable:
merge = False
if merge != self._merge:
self._merge = bool(merge)
self.mergeButton.setChecked(self._merge)
self.play_event.emit(self)
@property
def mergeable(self):
return self._mergeable
@mergeable.setter
def mergeable(self, mergeable):
self._mergeable = bool(mergeable)
if not mergeable:
self.merge = False
def __len__(self):
return self.size
def __str__(self):
classname = self.__class__.__name__
playing = "playing" if self.playing else "not playing"
return "<%s %s of length %d (%s)>" % (classname, self.name, self.size, playing)
def __repr__(self):
return self.__str__()
| 27.099567
| 257
| 0.616613
|
import os
import numpy as np
from PyQt5 import uic
from PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal
from PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap
from PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog)
class Dimension(QWidget):
_playing = False
_size = 0
_position = 0
_mergeable = False
_merge = False
_playable = False
_fps = 5.0
_max_playback_fps = 5.0
play_event = pyqtSignal(QWidget)
def __init__(self, name, size=0):
super(Dimension, self).__init__()
self.name = name
self._size = size
dirname = os.path.dirname(os.path.realpath(__file__))
uic.loadUi(os.path.join(dirname, 'dimension.ui'), self)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.click_event)
self.playTimer = QTimer()
self.playTimer.timeout.connect(self.play_tick)
self.posButton.pressed.connect(self.update_position_from_btn)
self.slider.setMaximum(self.size-1)
self.slider.valueChanged.connect(self.update_position_from_slider)
self.mergeButton.clicked.connect(self.update_merge)
if not self.mergeable:
self.mergeButton.hide()
self._merge = self.mergeButton.isChecked()
self.fps = self._fps
self.fpsButton.pressed.connect(self.fps_changed)
self.hide()
def merge_image_over_dimension(self, image):
try:
ix = image.shape.index(self._size)
except ValueError:
return image
if self.name != 'c':
image = np.sum(image, axis=ix)
return image
def enable(self):
if not self.playable:
return
self.setEnabled(True)
self.playButton.setEnabled(True)
self.posButton.setEnabled(True)
self.slider.setEnabled(True)
self.fpsButton.setEnabled(True)
if self.mergeable:
self.mergeButton.setEnabled(True)
self.mergeButton.show()
self.show()
def disable(self):
self.setEnabled(False)
self.playButton.setEnabled(False)
self.posButton.setEnabled(False)
self.slider.setEnabled(False)
self.fpsButton.setEnabled(False)
self.mergeButton.setEnabled(False)
def fps_changed(self):
fps, ok = QInputDialog.getDouble(self, "Playback framerate", "New playback framerate", self.fps)
if ok:
self.fps = fps
def click_event(self):
if not self.playable:
return
if not self.playing:
self.playing = True
else:
self.playing = False
def play_tick(self):
if not self.playing:
return
if self._fps > self._max_playback_fps:
self.position += int(round(self._fps / self._max_playback_fps))
else:
self.position += 1
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
self.position = 0
self.playing = False
self.slider.setMinimum(0)
self.slider.setMaximum(self.size-1)
@property
def fps(self):
return self._fps
@fps.setter
def fps(self, fps):
fps = float(fps)
self._fps = fps
play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps
self.playTimer.setInterval(int(round(1000.0 / play_fps)))
self.fpsButton.setText('%d fps' % self.fps)
@property
def playable(self):
return self._playable
@playable.setter
def playable(self, playable):
self._playable = bool(playable)
@property
def playing(self):
return self._playing
@playing.setter
def playing(self, playing):
self._playing = bool(playing)
if self._playing:
self.playTimer.start()
else:
self.playTimer.stop()
@property
def position(self):
return self._position
def update_position_from_slider(self):
position = self.slider.value()
if position >= 0:
self.position = position
def update_position_from_btn(self):
position, ok = QInputDialog.getInt(self, "'%s' position" % self.name, "New '%s' position (0-%d)" % (self.name, self.size-1), self.position, 0, self.size-1)
if ok:
self.position = position
@position.setter
def position(self, position):
old_position = self.position
while position < 0:
position += self.size
if position < self.size:
self._position = position
else:
self._position = position - self.size
self.slider.setValue(self.position)
self.posButton.setText('%s=%d' % (self.name, self.position))
if old_position != self.position:
self.play_event.emit(self)
def update_merge(self):
self.merge = self.mergeButton.isChecked()
@property
def merge(self):
return self._merge
@merge.setter
def merge(self, merge):
if not self.mergeable:
merge = False
if merge != self._merge:
self._merge = bool(merge)
self.mergeButton.setChecked(self._merge)
self.play_event.emit(self)
@property
def mergeable(self):
return self._mergeable
@mergeable.setter
def mergeable(self, mergeable):
self._mergeable = bool(mergeable)
if not mergeable:
self.merge = False
def __len__(self):
return self.size
def __str__(self):
classname = self.__class__.__name__
playing = "playing" if self.playing else "not playing"
return "<%s %s of length %d (%s)>" % (classname, self.name, self.size, playing)
def __repr__(self):
return self.__str__()
| true
| true
|
f716f40ff8616b758469bb436739279671f60ee1
| 327
|
py
|
Python
|
deform_conv/utils.py
|
lone17/deform-conv
|
3502cedbeae61c961d7e988382c55b9d45fd1873
|
[
"MIT"
] | 221
|
2017-03-30T12:31:02.000Z
|
2022-03-24T08:39:26.000Z
|
deform_conv/utils.py
|
ml-lab/deform-conv
|
126ebcc283a4325c474332fa170f57d52a59e34d
|
[
"MIT"
] | 1
|
2019-03-09T11:01:39.000Z
|
2019-03-09T11:01:39.000Z
|
deform_conv/utils.py
|
ml-lab/deform-conv
|
126ebcc283a4325c474332fa170f57d52a59e34d
|
[
"MIT"
] | 78
|
2017-03-30T21:46:59.000Z
|
2022-03-19T19:52:19.000Z
|
from __future__ import absolute_import, division
from tensorflow.python import debug as tf_debug
import keras.backend as K
def keras_set_tf_debug():
sess = K.get_session()
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
K.set_session(sess)
| 27.25
| 69
| 0.788991
|
from __future__ import absolute_import, division
from tensorflow.python import debug as tf_debug
import keras.backend as K
def keras_set_tf_debug():
sess = K.get_session()
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
K.set_session(sess)
| true
| true
|
f716f5a922081969766e4529bf89d92758cc2879
| 11,057
|
py
|
Python
|
runway/aws_sso_botocore/credentials.py
|
avosper-intellaegis/runway
|
757d4e7db269ec16479b044ac82a69f25fa2a450
|
[
"Apache-2.0"
] | null | null | null |
runway/aws_sso_botocore/credentials.py
|
avosper-intellaegis/runway
|
757d4e7db269ec16479b044ac82a69f25fa2a450
|
[
"Apache-2.0"
] | null | null | null |
runway/aws_sso_botocore/credentials.py
|
avosper-intellaegis/runway
|
757d4e7db269ec16479b044ac82a69f25fa2a450
|
[
"Apache-2.0"
] | null | null | null |
"""Botocore with support for AWS SSO credential assets."""
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import logging
import os
from hashlib import sha1
from botocore import UNSIGNED
from botocore.config import Config
from botocore.credentials import (
AssumeRoleProvider,
BotoProvider,
CachedCredentialFetcher,
CanonicalNameCredentialSourcer,
ContainerProvider,
CredentialProvider,
CredentialResolver,
DeferredRefreshableCredentials,
EnvProvider,
InstanceMetadataFetcher,
InstanceMetadataProvider,
JSONFileCache,
OriginalEC2Provider,
)
from botocore.credentials import (
ProfileProviderBuilder as BotocoreProfileProviderBuilder,
)
from botocore.credentials import _get_client_creator, _serialize_if_needed
from botocore.exceptions import InvalidConfigError
from dateutil.tz import tzutc
from .exceptions import UnauthorizedSSOTokenError
from .util import SSOTokenLoader
LOGGER = logging.getLogger(__name__)
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable("profile") or "default"
metadata_timeout = session.get_config_variable("metadata_service_timeout")
num_attempts = session.get_config_variable("metadata_service_num_attempts")
disable_env_vars = session.instance_variables().get("profile") is not None
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name
)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer(
[env_provider, container_provider, instance_metadata_provider]
),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name, disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
LOGGER.debug(
"Skipping environment variable credential check"
" because profile name was explicitly set."
)
return CredentialResolver(providers=providers)
class ProfileProviderBuilder(BotocoreProfileProviderBuilder):
"""Extends the botocore profile provider builder to support AWS SSO."""
def __init__(self, session, cache=None, region_name=None, sso_token_cache=None):
"""Instantiate class."""
super().__init__(session, cache, region_name)
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
"""Return list of providers."""
return [
self._create_web_identity_provider(profile_name, disable_env_vars,),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_sso_provider(self, profile_name):
"""AWS SSO credential provider."""
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
class SSOCredentialFetcher(CachedCredentialFetcher):
"""AWS SSO credential fetcher."""
def __init__(
self,
start_url,
sso_region,
role_name,
account_id,
client_creator,
token_loader=None,
cache=None,
expiry_window_seconds=None,
):
"""Instantiate class."""
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super().__init__(cache, expiry_window_seconds)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = {
"startUrl": self._start_url,
"roleName": self._role_name,
"accountId": self._account_id,
}
# NOTE: It would be good to hoist this cache key construction logic
# into the CachedCredentialFetcher class as we should be consistent.
# Unfortunately, the current assume role fetchers that sub class don't
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(",", ":"))
argument_hash = sha1(args.encode("utf-8")).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms): # pylint: disable=no-self-use
"""Parse timestamp."""
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return _serialize_if_needed(timestamp)
def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(signature_version=UNSIGNED, region_name=self._sso_region,)
client = self._client_creator("sso", config=config)
kwargs = {
"roleName": self._role_name,
"accountId": self._account_id,
"accessToken": self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException as exc:
raise UnauthorizedSSOTokenError() from exc
credentials = response["roleCredentials"]
credentials = {
"ProviderType": "sso",
"Credentials": {
"AccessKeyId": credentials["accessKeyId"],
"SecretAccessKey": credentials["secretAccessKey"],
"SessionToken": credentials["sessionToken"],
"Expiration": self._parse_timestamp(credentials["expiration"]),
},
}
return credentials
class SSOProvider(CredentialProvider):
"""AWS SSO credential provider."""
METHOD = "sso"
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(os.path.join("~", ".aws", "sso", "cache"))
_SSO_CONFIG_VARS = [
"sso_start_url",
"sso_region",
"sso_role_name",
"sso_account_id",
]
# pylint: disable=super-init-not-called
def __init__(
self, load_config, client_creator, profile_name, cache=None, token_cache=None
):
"""Instantiate class."""
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
"""Load sso config."""
loaded_config = self._load_config()
profiles = loaded_config.get("profiles", {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ", ".join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
"required configuration: %s" % (profile_name, missing)
)
)
return config
def load(self):
"""Load AWS SSO credentials."""
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config["sso_start_url"],
sso_config["sso_region"],
sso_config["sso_role_name"],
sso_config["sso_account_id"],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD, refresh_using=sso_fetcher.fetch_credentials,
)
| 35.213376
| 88
| 0.665823
|
import datetime
import json
import logging
import os
from hashlib import sha1
from botocore import UNSIGNED
from botocore.config import Config
from botocore.credentials import (
AssumeRoleProvider,
BotoProvider,
CachedCredentialFetcher,
CanonicalNameCredentialSourcer,
ContainerProvider,
CredentialProvider,
CredentialResolver,
DeferredRefreshableCredentials,
EnvProvider,
InstanceMetadataFetcher,
InstanceMetadataProvider,
JSONFileCache,
OriginalEC2Provider,
)
from botocore.credentials import (
ProfileProviderBuilder as BotocoreProfileProviderBuilder,
)
from botocore.credentials import _get_client_creator, _serialize_if_needed
from botocore.exceptions import InvalidConfigError
from dateutil.tz import tzutc
from .exceptions import UnauthorizedSSOTokenError
from .util import SSOTokenLoader
LOGGER = logging.getLogger(__name__)
def create_credential_resolver(session, cache=None, region_name=None):
profile_name = session.get_config_variable("profile") or "default"
metadata_timeout = session.get_config_variable("metadata_service_timeout")
num_attempts = session.get_config_variable("metadata_service_num_attempts")
disable_env_vars = session.instance_variables().get("profile") is not None
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name
)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer(
[env_provider, container_provider, instance_metadata_provider]
),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name, disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
providers.remove(env_provider)
LOGGER.debug(
"Skipping environment variable credential check"
" because profile name was explicitly set."
)
return CredentialResolver(providers=providers)
class ProfileProviderBuilder(BotocoreProfileProviderBuilder):
def __init__(self, session, cache=None, region_name=None, sso_token_cache=None):
super().__init__(session, cache, region_name)
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
return [
self._create_web_identity_provider(profile_name, disable_env_vars,),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_sso_provider(self, profile_name):
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
class SSOCredentialFetcher(CachedCredentialFetcher):
def __init__(
self,
start_url,
sso_region,
role_name,
account_id,
client_creator,
token_loader=None,
cache=None,
expiry_window_seconds=None,
):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super().__init__(cache, expiry_window_seconds)
def _create_cache_key(self):
args = {
"startUrl": self._start_url,
"roleName": self._role_name,
"accountId": self._account_id,
}
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(",", ":"))
argument_hash = sha1(args.encode("utf-8")).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms): # pylint: disable=no-self-use
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return _serialize_if_needed(timestamp)
def _get_credentials(self):
config = Config(signature_version=UNSIGNED, region_name=self._sso_region,)
client = self._client_creator("sso", config=config)
kwargs = {
"roleName": self._role_name,
"accountId": self._account_id,
"accessToken": self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException as exc:
raise UnauthorizedSSOTokenError() from exc
credentials = response["roleCredentials"]
credentials = {
"ProviderType": "sso",
"Credentials": {
"AccessKeyId": credentials["accessKeyId"],
"SecretAccessKey": credentials["secretAccessKey"],
"SessionToken": credentials["sessionToken"],
"Expiration": self._parse_timestamp(credentials["expiration"]),
},
}
return credentials
class SSOProvider(CredentialProvider):
METHOD = "sso"
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(os.path.join("~", ".aws", "sso", "cache"))
_SSO_CONFIG_VARS = [
"sso_start_url",
"sso_region",
"sso_role_name",
"sso_account_id",
]
# pylint: disable=super-init-not-called
def __init__(
self, load_config, client_creator, profile_name, cache=None, token_cache=None
):
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
loaded_config = self._load_config()
profiles = loaded_config.get("profiles", {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ", ".join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
"required configuration: %s" % (profile_name, missing)
)
)
return config
def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config["sso_start_url"],
sso_config["sso_region"],
sso_config["sso_role_name"],
sso_config["sso_account_id"],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD, refresh_using=sso_fetcher.fetch_credentials,
)
| true
| true
|
f716f72e90955ad7a4275213b789b78de850af13
| 5,819
|
py
|
Python
|
storm_control/sc_hardware/hamamatsu/io_tests.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 47
|
2015-02-11T16:05:54.000Z
|
2022-03-26T14:13:12.000Z
|
storm_control/sc_hardware/hamamatsu/io_tests.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 110
|
2015-01-30T03:53:41.000Z
|
2021-11-03T15:58:44.000Z
|
storm_control/sc_hardware/hamamatsu/io_tests.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 61
|
2015-01-09T18:31:27.000Z
|
2021-12-21T13:07:51.000Z
|
#!/usr/bin/python
#
## @file
#
# For testing how to write 2048 x 2048 pixels at 100fps.
#
# Hazen 10/13
#
import ctypes
import ctypes.util
import numpy
import time
import hamamatsu_camera as hc
print "camera 0 model:", hc.getModelInfo(0)
hcam = hc.HamamatsuCameraMR(0)
# Set camera parameters.
cam_offset = 100
cam_x = 2048
cam_y = 2048
hcam.setPropertyValue("defect_correct_mode", "OFF")
hcam.setPropertyValue("exposure_time", 0.01)
hcam.setPropertyValue("subarray_hsize", cam_x)
hcam.setPropertyValue("subarray_vsize", cam_y)
hcam.setPropertyValue("binning", "1x1")
hcam.setPropertyValue("readout_speed", 2)
# Test image streaming using numpy.
if 1:
bin_fp = open("e:/zhuang/test.bin", "wb")
hcam.startAcquisition()
for i in range(1000):
# Get frames.
[frames, dims] = hcam.getFrames()
# Save frames.
for aframe in frames:
np_data = aframe.getData()
np_data.tofile(bin_fp)
# Print backlog.
print i, len(frames)
if (len(frames) > 20):
exit()
hcam.stopAcquisition()
bin_fp.close()
# Test writing images as separate files w/ numpy.
if 0:
cur_file = 0
hcam.startAcquisition()
for i in range(30):
# Get frames.
[frames, dims] = hcam.getFrames()
# Save frames.
for aframe in frames:
np_data = aframe.getData()
bin_fp = open("e:/zhuang/test" + str(cur_file) + ".bin", "wb")
np_data.tofile(bin_fp)
bin_fp.close()
cur_file += 1
# Print backlog.
print i, len(frames)
hcam.stopAcquisition()
# Test image streaming using numpy.memmap.
if 0:
fsize = 2048*2048
max_frame = 1000
mem_fp = numpy.memmap("e:/zhuang/test.bin", mode = "write", dtype = numpy.uint16, shape = fsize * max_frame)
hcam.startAcquisition()
cur_frame = 0
for i in range(10):
# Get frames.
[frames, dims] = hcam.getFrames()
# Save frames.
for aframe in frames:
if (cur_frame < max_frame):
mem_fp[cur_frame*fsize:(cur_frame+1)*fsize] = aframe.getData()
cur_frame += 1
# Record backlog.
print i, len(frames)
print "Saved", cur_frame, "frames"
hcam.stopAcquisition()
# Test image streaming using C / fwrite.
if 0:
c_lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
fopen = c_lib.fopen
fopen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
fopen.restype = ctypes.c_void_p
fwrite = c_lib.fwrite
fwrite.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
fwrite.restype = ctypes.c_int
fclose = c_lib.fclose
fclose.argtypes = [ctypes.c_void_p]
fclose.restype = ctypes.c_int
bin_fp = fopen("e:/zhuang/test.bin", "wb")
hcam.startAcquisition()
for i in range(30):
# Get frames.
[frames, dims] = hcam.getFrames()
# Save frames.
for aframe in frames:
np_data = aframe.getData()
fwrite(np_data.ctypes.data,
2,
np_data.size,
bin_fp)
# Record backlog.
print i, len(frames)
hcam.stopAcquisition()
fclose(bin_fp)
# Test image streaming using C / write.
if 0:
c_lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
_O_WRONLY = int("0x0001", 0)
_O_CREAT = int("0x0200", 0)
_S_IWRITE = int("0x0200", 0)
fopen = c_lib._open
fopen.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_int]
fopen.restype = ctypes.c_int
write = c_lib._write
write.argtypes = [ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int]
write.restype = ctypes.c_int
fclose = c_lib._close
fclose.argtypes = [ctypes.c_int]
bin_fd = fopen("e:/zhuang/test.bin", _O_WRONLY + _O_CREAT, _S_IWRITE)
hcam.startAcquisition()
for i in range(4):
# Get frames.
[frames, dims] = hcam.getFrames()
# Save frames.
for aframe in frames:
np_data = aframe.getData()
print write(bin_fd,
np_data.ctypes.data,
np_data.size*2)
# Record backlog.
print i, len(frames)
hcam.stopAcquisition()
fclose(bin_fd)
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 28.247573
| 113
| 0.598556
|
import ctypes
import ctypes.util
import numpy
import time
import hamamatsu_camera as hc
print "camera 0 model:", hc.getModelInfo(0)
hcam = hc.HamamatsuCameraMR(0)
cam_offset = 100
cam_x = 2048
cam_y = 2048
hcam.setPropertyValue("defect_correct_mode", "OFF")
hcam.setPropertyValue("exposure_time", 0.01)
hcam.setPropertyValue("subarray_hsize", cam_x)
hcam.setPropertyValue("subarray_vsize", cam_y)
hcam.setPropertyValue("binning", "1x1")
hcam.setPropertyValue("readout_speed", 2)
if 1:
bin_fp = open("e:/zhuang/test.bin", "wb")
hcam.startAcquisition()
for i in range(1000):
[frames, dims] = hcam.getFrames()
for aframe in frames:
np_data = aframe.getData()
np_data.tofile(bin_fp)
print i, len(frames)
if (len(frames) > 20):
exit()
hcam.stopAcquisition()
bin_fp.close()
if 0:
cur_file = 0
hcam.startAcquisition()
for i in range(30):
[frames, dims] = hcam.getFrames()
for aframe in frames:
np_data = aframe.getData()
bin_fp = open("e:/zhuang/test" + str(cur_file) + ".bin", "wb")
np_data.tofile(bin_fp)
bin_fp.close()
cur_file += 1
print i, len(frames)
hcam.stopAcquisition()
if 0:
fsize = 2048*2048
max_frame = 1000
mem_fp = numpy.memmap("e:/zhuang/test.bin", mode = "write", dtype = numpy.uint16, shape = fsize * max_frame)
hcam.startAcquisition()
cur_frame = 0
for i in range(10):
[frames, dims] = hcam.getFrames()
for aframe in frames:
if (cur_frame < max_frame):
mem_fp[cur_frame*fsize:(cur_frame+1)*fsize] = aframe.getData()
cur_frame += 1
print i, len(frames)
print "Saved", cur_frame, "frames"
hcam.stopAcquisition()
if 0:
c_lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
fopen = c_lib.fopen
fopen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
fopen.restype = ctypes.c_void_p
fwrite = c_lib.fwrite
fwrite.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
fwrite.restype = ctypes.c_int
fclose = c_lib.fclose
fclose.argtypes = [ctypes.c_void_p]
fclose.restype = ctypes.c_int
bin_fp = fopen("e:/zhuang/test.bin", "wb")
hcam.startAcquisition()
for i in range(30):
[frames, dims] = hcam.getFrames()
for aframe in frames:
np_data = aframe.getData()
fwrite(np_data.ctypes.data,
2,
np_data.size,
bin_fp)
print i, len(frames)
hcam.stopAcquisition()
fclose(bin_fp)
if 0:
c_lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
_O_WRONLY = int("0x0001", 0)
_O_CREAT = int("0x0200", 0)
_S_IWRITE = int("0x0200", 0)
fopen = c_lib._open
fopen.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_int]
fopen.restype = ctypes.c_int
write = c_lib._write
write.argtypes = [ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int]
write.restype = ctypes.c_int
fclose = c_lib._close
fclose.argtypes = [ctypes.c_int]
bin_fd = fopen("e:/zhuang/test.bin", _O_WRONLY + _O_CREAT, _S_IWRITE)
hcam.startAcquisition()
for i in range(4):
[frames, dims] = hcam.getFrames()
for aframe in frames:
np_data = aframe.getData()
print write(bin_fd,
np_data.ctypes.data,
np_data.size*2)
print i, len(frames)
hcam.stopAcquisition()
fclose(bin_fd)
| false
| true
|
f716f787333a832b615b0a6c98f4f3cf3223b40f
| 148
|
py
|
Python
|
_config.py
|
atourkow/at-InAppScoring
|
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
|
[
"Apache-2.0"
] | null | null | null |
_config.py
|
atourkow/at-InAppScoring
|
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
|
[
"Apache-2.0"
] | null | null | null |
_config.py
|
atourkow/at-InAppScoring
|
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
|
[
"Apache-2.0"
] | null | null | null |
class Config(object):
def __init__(self):
self.servers = [
"127.0.0.1"
]
self.keyspace = 'at_inappscoring'
| 18.5
| 41
| 0.513514
|
class Config(object):
def __init__(self):
self.servers = [
"127.0.0.1"
]
self.keyspace = 'at_inappscoring'
| true
| true
|
f716f8c015aff41ef3b8ac361688f47dbb0587ed
| 860
|
py
|
Python
|
app/models.py
|
imireallan/Bucketlist
|
2dc496cf866d6b21594f9bd7efd12af43ee77cba
|
[
"MIT"
] | null | null | null |
app/models.py
|
imireallan/Bucketlist
|
2dc496cf866d6b21594f9bd7efd12af43ee77cba
|
[
"MIT"
] | null | null | null |
app/models.py
|
imireallan/Bucketlist
|
2dc496cf866d6b21594f9bd7efd12af43ee77cba
|
[
"MIT"
] | null | null | null |
# app/models.py
from app import db
class Bucketlist(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
| 25.294118
| 78
| 0.636047
|
from app import db
class Bucketlist(db.Model):
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
| true
| true
|
f716fa1463f1193d4ff8966502c6b97695669997
| 5,056
|
py
|
Python
|
tf2_ndg_benckmarks/metrics/embedding.py
|
katsugeneration/tf2-ndg-benchmarks
|
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
|
[
"MIT"
] | 1
|
2020-11-17T07:03:47.000Z
|
2020-11-17T07:03:47.000Z
|
tf2_ndg_benckmarks/metrics/embedding.py
|
katsugeneration/tf2-ndg-benchmarks
|
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
|
[
"MIT"
] | null | null | null |
tf2_ndg_benckmarks/metrics/embedding.py
|
katsugeneration/tf2-ndg-benchmarks
|
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
|
[
"MIT"
] | null | null | null |
"""
Copyright:
Copyright 2019 by Katsuya SHIMABUKURO.
License:
MIT, see LICENSE for details.
"""
import pathlib
import gzip
import requests
import tqdm
import numpy as np
from gensim.models import KeyedVectors
FILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM'
SOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}'
SOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}'
class EmbeddingBase(object):
"""Embedding based score calculator base."""
def __init__(
self,
emb_path: str = '/tmp/vector.bin'):
"""Embedding class initialization.
Args:
emb_path (str): Embedding binary file path. When emb_path is not found, start to download from internet.
"""
self.emb_path = emb_path
_emb_path = pathlib.Path(self.emb_path)
if _emb_path.exists():
self._load()
return
_emb_gz_path = pathlib.Path(self.emb_path + '.gz')
# Downloas Google pre-trained vector bin from Google Drive
# Get confirmation code
res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID}))
cookies = res.cookies
res.close()
code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))]
# Download file.
res = requests.get(
SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}),
cookies=cookies,
stream=True)
pbar = tqdm.tqdm(unit="B", unit_scale=True, desc='Download Google news corpus pre-trained vectors.')
chunck_size = 1024
with _emb_gz_path.open('wb') as w:
for chunck in res.iter_content(chunck_size):
w.write(chunck)
pbar.update(len(chunck))
pbar.close()
res.close()
# Decompress gzip file.
with _emb_gz_path.open('rb') as f:
with _emb_path.open('wb') as w:
w.write(gzip.decompress(f.read()))
self._load()
def _load(self):
"""Load word2vec model."""
self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True)
assert 'dog' in self.model
def _get_vectors_from_sentene(self, sentence):
"""Return contains word vector list."""
return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model]
def _calc_cosine_sim(self, vectors1, vectors2):
"""Calculate cosine similarity."""
vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True)
vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True)
return np.dot(vectors1, vectors2.T)
class Average(EmbeddingBase):
"""Embedding based average score calculator."""
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
"""Embedding Average metrics.
Args:
reference (str): reference sentence.
hypothesis: (str): hypothesis sentence.
Return:
float: Embedding Average score
"""
emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0)
emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0)
return self._calc_cosine_sim(emb_ref, emb_hyp)
class VectorExtrema(EmbeddingBase):
"""Embedding based vector extrema score calculator."""
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
"""Embedding Vector Extrema metrics.
Args:
reference (str): reference sentence.
hypothesis: (str): hypothesis sentence.
Return:
float: Embedding Vector Extrema score
"""
def extema(vectors):
vec_max = np.max(vectors, axis=0)
vec_min = np.min(vectors, axis=0)
return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min))
extema_ref = extema(self._get_vectors_from_sentene(reference))
extema_hyp = extema(self._get_vectors_from_sentene(hypothesis))
return self._calc_cosine_sim(extema_ref, extema_hyp)
class GreedyMatching(EmbeddingBase):
"""Embedding based greedy matching score calculator."""
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
"""Embedding greedy matching metrics.
Args:
reference (str): reference sentence.
hypothesis: (str): hypothesis sentence.
Return:
float: Embedding Greedy Matching score
"""
embs_ref = np.array(self._get_vectors_from_sentene(reference))
embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis))
cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp) # len(embs_ref) x len(embs_hyp) matrix
greedy_ref = np.max(cs_matrix, axis=0).mean()
greedy_hyp = np.max(cs_matrix, axis=1).mean()
return (greedy_ref + greedy_hyp) / 2.0
| 31.798742
| 116
| 0.617089
|
import pathlib
import gzip
import requests
import tqdm
import numpy as np
from gensim.models import KeyedVectors
FILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM'
SOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}'
SOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}'
class EmbeddingBase(object):
def __init__(
self,
emb_path: str = '/tmp/vector.bin'):
self.emb_path = emb_path
_emb_path = pathlib.Path(self.emb_path)
if _emb_path.exists():
self._load()
return
_emb_gz_path = pathlib.Path(self.emb_path + '.gz')
res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID}))
cookies = res.cookies
res.close()
code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))]
res = requests.get(
SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}),
cookies=cookies,
stream=True)
pbar = tqdm.tqdm(unit="B", unit_scale=True, desc='Download Google news corpus pre-trained vectors.')
chunck_size = 1024
with _emb_gz_path.open('wb') as w:
for chunck in res.iter_content(chunck_size):
w.write(chunck)
pbar.update(len(chunck))
pbar.close()
res.close()
with _emb_gz_path.open('rb') as f:
with _emb_path.open('wb') as w:
w.write(gzip.decompress(f.read()))
self._load()
def _load(self):
self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True)
assert 'dog' in self.model
def _get_vectors_from_sentene(self, sentence):
return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model]
def _calc_cosine_sim(self, vectors1, vectors2):
vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True)
vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True)
return np.dot(vectors1, vectors2.T)
class Average(EmbeddingBase):
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0)
emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0)
return self._calc_cosine_sim(emb_ref, emb_hyp)
class VectorExtrema(EmbeddingBase):
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
def extema(vectors):
vec_max = np.max(vectors, axis=0)
vec_min = np.min(vectors, axis=0)
return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min))
extema_ref = extema(self._get_vectors_from_sentene(reference))
extema_hyp = extema(self._get_vectors_from_sentene(hypothesis))
return self._calc_cosine_sim(extema_ref, extema_hyp)
class GreedyMatching(EmbeddingBase):
def sentence_score(
self,
reference: str,
hypothesis: str) -> float:
embs_ref = np.array(self._get_vectors_from_sentene(reference))
embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis))
cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp)
greedy_ref = np.max(cs_matrix, axis=0).mean()
greedy_hyp = np.max(cs_matrix, axis=1).mean()
return (greedy_ref + greedy_hyp) / 2.0
| true
| true
|
f716fbfc8df971e7d631e0b0b90237bc351ca7f6
| 11,145
|
py
|
Python
|
dolo/algos/simulations.py
|
EconForge/dolo
|
9bb75b8f6ea87578393fe748003092ffb745e8d6
|
[
"BSD-2-Clause"
] | 50
|
2015-03-16T01:07:00.000Z
|
2020-02-07T22:18:43.000Z
|
dolo/algos/simulations.py
|
EconForge/dolo
|
9bb75b8f6ea87578393fe748003092ffb745e8d6
|
[
"BSD-2-Clause"
] | 130
|
2015-01-01T19:33:21.000Z
|
2020-04-27T15:57:22.000Z
|
dolo/algos/simulations.py
|
EconForge/dolo
|
9bb75b8f6ea87578393fe748003092ffb745e8d6
|
[
"BSD-2-Clause"
] | 56
|
2015-10-14T12:27:26.000Z
|
2020-04-21T14:56:02.000Z
|
import numpy
import pandas
import xarray as xr
import numpy as np
from dolo.compiler.model import Model
from dolo.numeric.optimize.ncpsolve import ncpsolve
from dolo.numeric.optimize.newton import newton as newton_solver
from dolo.numeric.optimize.newton import SerialDifferentiableFunction
## TODO: extend for mc process
def response(model, dr, varname, T=40, impulse: float = None):
i_exo = model.symbols["exogenous"].index(varname)
if impulse is None:
try:
impulse = numpy.sqrt(
model.exogenous.Σ[i_exo, i_exo]
) # works only for IID/AR1
except:
impulse = numpy.sqrt(model.exogenous.σ) # works only for IID/AR1
e1 = numpy.zeros(len(model.symbols["exogenous"]))
e1[i_exo] = impulse
exogenous = model.exogenous
print(exogenous)
print(T, e1)
m_simul = model.exogenous.response(T - 1, e1) # this is an xarray T x V
m_simul = m_simul.expand_dims("N")
m_simul = m_simul.transpose("T", "N", "V").data
sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)
irf = sim.sel(N=0)
return irf
def find_index(sim, values):
sh = sim.shape
N = sh[0]
T = sh[1]
indices = np.zeros((N, T), dtype=int)
for n in range(N):
for t in range(T):
v = sim[n, t, :]
ind = np.where((values == v[None, :]).all(axis=1))[0][0]
indices[n, t] = ind
return indices
from dolo.numeric.grids import CartesianGrid, UnstructuredGrid
from dolo.algos.results import AlgoResult
from dolo.numeric.decision_rule import DecisionRule
def simulate(
model: Model,
dr: DecisionRule,
*,
process=None,
N=1,
T=40,
s0=None,
i0=None,
m0=None,
driving_process=None,
seed=42,
stochastic=True,
):
"""Simulate a model using the specified decision rule.
Parameters
----------
model: Model
dr: decision rule
process:
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
"""
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib["parameters"])
if s0 is None:
s0 = calib["states"]
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
# are we simulating a markov chain or a continuous process ?
if driving_process is not None:
if len(driving_process.shape) == 3:
m_simul = driving_process
sim_type = "continuous"
if m0 is None:
m0 = model.calibration["exogenous"]
x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :]
elif len(driving_process.shape) == 2:
i_simul = driving_process
nodes = dr.exo_grid.nodes
m_simul = nodes[i_simul]
# inds = i_simul.ravel()
# m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) )
sim_type = "discrete"
x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :]
else:
raise Exception("Incorrect specification of driving values.")
m0 = m_simul[0, :, :]
else:
from dolo.numeric.processes import DiscreteProcess
if process is None:
if hasattr(dr, "dprocess") and hasattr(dr.dprocess, "simulate"):
process = dr.dprocess
else:
process = model.exogenous
# detect type of simulation
if not isinstance(process, DiscreteProcess):
sim_type = "continuous"
else:
sim_type = "discrete"
if sim_type == "discrete":
if i0 is None:
i0 = 0
dp = process
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0[None, :])[0, :]
else:
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
if isinstance(m_simul, xr.DataArray):
m_simul = m_simul.data
sim_type = "continuous"
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :]
x_simul[0, :, :] = x0[None, :]
f = model.functions["arbitrage"]
g = model.functions["transition"]
numpy.random.seed(seed)
mp = m0
for i in range(T):
m = m_simul[i, :, :]
s = s_simul[i, :, :]
if sim_type == "discrete":
i_m = i_simul[i, :]
xx = [
dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0])
]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i, :, :] = x
ss = g(mp, s, x, m, parms)
if i < T - 1:
s_simul[i + 1, :, :] = ss
mp = m
if "auxiliary" not in model.functions: # TODO: find a better test than this
l = [s_simul, x_simul]
varnames = model.symbols["states"] + model.symbols["controls"]
else:
aux = model.functions["auxiliary"]
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)),
parms,
)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = (
model.symbols["exogenous"]
+ model.symbols["states"]
+ model.symbols["controls"]
+ model.symbols["auxiliaries"]
)
simul = numpy.concatenate(l, axis=2)
if sim_type == "discrete":
varnames = ["_i_m"] + varnames
simul = np.concatenate([i_simul[:, :, None], simul], axis=2)
data = xr.DataArray(
simul,
dims=["T", "N", "V"],
coords={"T": range(T), "N": range(N), "V": varnames},
)
return data
def tabulate(
model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs
):
import numpy
if isinstance(dr, AlgoResult):
dr = dr.dr
states_names = model.symbols["states"]
controls_names = model.symbols["controls"]
index = states_names.index(str(state))
if bounds is None:
try:
endo_grid = dr.endo_grid
bounds = [endo_grid.min[index], endo_grid.max[index]]
except:
domain = model.domain
bounds = [domain.min[index], domain.max[index]]
if bounds is None:
raise Exception("No bounds provided for simulation or by model.")
values = numpy.linspace(bounds[0], bounds[1], n_steps)
if s0 is None:
s0 = model.calibration["states"]
svec = numpy.row_stack([s0] * n_steps)
svec[:, index] = values
try:
dp = dr.dprocess
except:
dp = model.exogenous.discretize()
if (i0 is None) and (m0 is None):
from dolo.numeric.grids import UnstructuredGrid
if isinstance(dp.grid, UnstructuredGrid):
n_ms = dp.n_nodes
[q, r] = divmod(n_ms, 2)
i0 = q - 1 + r
else:
m0 = model.calibration["exogenous"]
if i0 is not None:
m = dp.node(i0)
xvec = dr.eval_is(i0, svec)
elif m0 is not None:
m = m0
xvec = dr.eval_ms(m0, svec)
mm = numpy.row_stack([m] * n_steps)
l = [mm, svec, xvec]
series = (
model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"]
)
if "auxiliary" in model.functions:
p = model.calibration["parameters"]
pp = numpy.row_stack([p] * n_steps)
avec = model.functions["auxiliary"](mm, svec, xvec, pp)
l.append(avec)
series.extend(model.symbols["auxiliaries"])
import pandas
tb = numpy.concatenate(l, axis=1)
df = pandas.DataFrame(tb, columns=series)
return df
def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]):
import numpy
import xarray as xr
if isinstance(dr, AlgoResult):
dr = dr.dr
if s0 is None:
s0 = model.calibration["states"]
if states is None:
states = model.symbols["states"]
assert len(states) == 2
domain = model.get_domain()
lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)]
i_x = model.symbols["states"].index(states[0])
i_y = model.symbols["states"].index(states[1])
vals = []
vstates = []
s = s0.copy()
for xx in lps[0]:
vv = []
s[i_x] = xx
for yy in lps[1]:
s[i_y] = yy
x = dr.eval_is(i0, s)
vv.append(numpy.concatenate([s, x]))
vals.append(vv)
vv = numpy.array(vals)
controls = model.symbols["states"] + model.symbols["controls"]
# tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V'])
tab = xr.DataArray(
vv,
dims=[states[0], states[1], "V"],
coords={states[0]: lps[0], states[1]: lps[1], "V": controls},
)
return tab
def plot3d(tab, varname):
X = numpy.array(tab[tab.dims[0]])
Y = numpy.array(tab[tab.dims[1]])
Z = numpy.array(tab.loc[:, :, varname])
data = [go.Surface(x=X, y=Y, z=Z)]
layout = go.Layout(
title="Equity",
autosize=False,
width=500,
height=500,
# xaxis=go.XAxis(title=tab.dims[0]),
# yaxis={'title':tab.dims[1]},
# zaxis={'title':varname},
xaxis=dict(
title="x Axis",
nticks=7,
titlefont=dict(family="Courier New, monospace", size=18, color="#7f7f7f"),
),
margin=dict(l=65, r=50, b=65, t=90),
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename="graph_" + varname)
def plot_decision_rule(plot_controls=None, **kwargs):
if isinstance(dr, AlgoResult):
dr = dr.dr
df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)
from matplotlib import pyplot
if isinstance(plot_controls, str):
cn = plot_controls
pyplot.plot(values, df[cn], **kwargs)
else:
for cn in plot_controls:
pyplot.plot(values, df[cn], label=cn, **kwargs)
pyplot.legend()
pyplot.xlabel("state = {} | mstate = {}".format(state, i0))
| 28.286802
| 126
| 0.557649
|
import numpy
import pandas
import xarray as xr
import numpy as np
from dolo.compiler.model import Model
from dolo.numeric.optimize.ncpsolve import ncpsolve
from dolo.numeric.optimize.newton import newton as newton_solver
from dolo.numeric.optimize.newton import SerialDifferentiableFunction
rname, T=40, impulse: float = None):
i_exo = model.symbols["exogenous"].index(varname)
if impulse is None:
try:
impulse = numpy.sqrt(
model.exogenous.Σ[i_exo, i_exo]
)
except:
impulse = numpy.sqrt(model.exogenous.σ)
e1 = numpy.zeros(len(model.symbols["exogenous"]))
e1[i_exo] = impulse
exogenous = model.exogenous
print(exogenous)
print(T, e1)
m_simul = model.exogenous.response(T - 1, e1)
m_simul = m_simul.expand_dims("N")
m_simul = m_simul.transpose("T", "N", "V").data
sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)
irf = sim.sel(N=0)
return irf
def find_index(sim, values):
sh = sim.shape
N = sh[0]
T = sh[1]
indices = np.zeros((N, T), dtype=int)
for n in range(N):
for t in range(T):
v = sim[n, t, :]
ind = np.where((values == v[None, :]).all(axis=1))[0][0]
indices[n, t] = ind
return indices
from dolo.numeric.grids import CartesianGrid, UnstructuredGrid
from dolo.algos.results import AlgoResult
from dolo.numeric.decision_rule import DecisionRule
def simulate(
model: Model,
dr: DecisionRule,
*,
process=None,
N=1,
T=40,
s0=None,
i0=None,
m0=None,
driving_process=None,
seed=42,
stochastic=True,
):
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib["parameters"])
if s0 is None:
s0 = calib["states"]
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
if driving_process is not None:
if len(driving_process.shape) == 3:
m_simul = driving_process
sim_type = "continuous"
if m0 is None:
m0 = model.calibration["exogenous"]
x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :]
elif len(driving_process.shape) == 2:
i_simul = driving_process
nodes = dr.exo_grid.nodes
m_simul = nodes[i_simul]
sim_type = "discrete"
x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :]
else:
raise Exception("Incorrect specification of driving values.")
m0 = m_simul[0, :, :]
else:
from dolo.numeric.processes import DiscreteProcess
if process is None:
if hasattr(dr, "dprocess") and hasattr(dr.dprocess, "simulate"):
process = dr.dprocess
else:
process = model.exogenous
if not isinstance(process, DiscreteProcess):
sim_type = "continuous"
else:
sim_type = "discrete"
if sim_type == "discrete":
if i0 is None:
i0 = 0
dp = process
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0[None, :])[0, :]
else:
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
if isinstance(m_simul, xr.DataArray):
m_simul = m_simul.data
sim_type = "continuous"
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :]
x_simul[0, :, :] = x0[None, :]
f = model.functions["arbitrage"]
g = model.functions["transition"]
numpy.random.seed(seed)
mp = m0
for i in range(T):
m = m_simul[i, :, :]
s = s_simul[i, :, :]
if sim_type == "discrete":
i_m = i_simul[i, :]
xx = [
dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0])
]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i, :, :] = x
ss = g(mp, s, x, m, parms)
if i < T - 1:
s_simul[i + 1, :, :] = ss
mp = m
if "auxiliary" not in model.functions:
l = [s_simul, x_simul]
varnames = model.symbols["states"] + model.symbols["controls"]
else:
aux = model.functions["auxiliary"]
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)),
parms,
)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = (
model.symbols["exogenous"]
+ model.symbols["states"]
+ model.symbols["controls"]
+ model.symbols["auxiliaries"]
)
simul = numpy.concatenate(l, axis=2)
if sim_type == "discrete":
varnames = ["_i_m"] + varnames
simul = np.concatenate([i_simul[:, :, None], simul], axis=2)
data = xr.DataArray(
simul,
dims=["T", "N", "V"],
coords={"T": range(T), "N": range(N), "V": varnames},
)
return data
def tabulate(
model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs
):
import numpy
if isinstance(dr, AlgoResult):
dr = dr.dr
states_names = model.symbols["states"]
controls_names = model.symbols["controls"]
index = states_names.index(str(state))
if bounds is None:
try:
endo_grid = dr.endo_grid
bounds = [endo_grid.min[index], endo_grid.max[index]]
except:
domain = model.domain
bounds = [domain.min[index], domain.max[index]]
if bounds is None:
raise Exception("No bounds provided for simulation or by model.")
values = numpy.linspace(bounds[0], bounds[1], n_steps)
if s0 is None:
s0 = model.calibration["states"]
svec = numpy.row_stack([s0] * n_steps)
svec[:, index] = values
try:
dp = dr.dprocess
except:
dp = model.exogenous.discretize()
if (i0 is None) and (m0 is None):
from dolo.numeric.grids import UnstructuredGrid
if isinstance(dp.grid, UnstructuredGrid):
n_ms = dp.n_nodes
[q, r] = divmod(n_ms, 2)
i0 = q - 1 + r
else:
m0 = model.calibration["exogenous"]
if i0 is not None:
m = dp.node(i0)
xvec = dr.eval_is(i0, svec)
elif m0 is not None:
m = m0
xvec = dr.eval_ms(m0, svec)
mm = numpy.row_stack([m] * n_steps)
l = [mm, svec, xvec]
series = (
model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"]
)
if "auxiliary" in model.functions:
p = model.calibration["parameters"]
pp = numpy.row_stack([p] * n_steps)
avec = model.functions["auxiliary"](mm, svec, xvec, pp)
l.append(avec)
series.extend(model.symbols["auxiliaries"])
import pandas
tb = numpy.concatenate(l, axis=1)
df = pandas.DataFrame(tb, columns=series)
return df
def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]):
import numpy
import xarray as xr
if isinstance(dr, AlgoResult):
dr = dr.dr
if s0 is None:
s0 = model.calibration["states"]
if states is None:
states = model.symbols["states"]
assert len(states) == 2
domain = model.get_domain()
lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)]
i_x = model.symbols["states"].index(states[0])
i_y = model.symbols["states"].index(states[1])
vals = []
vstates = []
s = s0.copy()
for xx in lps[0]:
vv = []
s[i_x] = xx
for yy in lps[1]:
s[i_y] = yy
x = dr.eval_is(i0, s)
vv.append(numpy.concatenate([s, x]))
vals.append(vv)
vv = numpy.array(vals)
controls = model.symbols["states"] + model.symbols["controls"]
tab = xr.DataArray(
vv,
dims=[states[0], states[1], "V"],
coords={states[0]: lps[0], states[1]: lps[1], "V": controls},
)
return tab
def plot3d(tab, varname):
X = numpy.array(tab[tab.dims[0]])
Y = numpy.array(tab[tab.dims[1]])
Z = numpy.array(tab.loc[:, :, varname])
data = [go.Surface(x=X, y=Y, z=Z)]
layout = go.Layout(
title="Equity",
autosize=False,
width=500,
height=500,
xaxis=dict(
title="x Axis",
nticks=7,
titlefont=dict(family="Courier New, monospace", size=18, color="#7f7f7f"),
),
margin=dict(l=65, r=50, b=65, t=90),
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename="graph_" + varname)
def plot_decision_rule(plot_controls=None, **kwargs):
if isinstance(dr, AlgoResult):
dr = dr.dr
df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)
from matplotlib import pyplot
if isinstance(plot_controls, str):
cn = plot_controls
pyplot.plot(values, df[cn], **kwargs)
else:
for cn in plot_controls:
pyplot.plot(values, df[cn], label=cn, **kwargs)
pyplot.legend()
pyplot.xlabel("state = {} | mstate = {}".format(state, i0))
| true
| true
|
f716fcb9c6975762b9257b5426a806187ac75ac5
| 6,270
|
py
|
Python
|
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
|
vishvananda/dagster
|
f6aa44714246bc770fe05a9c986fe8b7d848956b
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
|
vishvananda/dagster
|
f6aa44714246bc770fe05a9c986fe8b7d848956b
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
|
vishvananda/dagster
|
f6aa44714246bc770fe05a9c986fe8b7d848956b
|
[
"Apache-2.0"
] | null | null | null |
# pylint doesn't understand the way that pytest constructs fixture dependnecies
# pylint: disable=redefined-outer-name
import datetime
import os
import shutil
import subprocess
import sys
import tempfile
import uuid
import airflow.plugins_manager
import docker
import pytest
from dagster import check
from dagster.core.execution import create_execution_plan
from dagster.utils import load_yaml_from_path, mkdir_p, script_relative_path
from dagster_airflow import scaffold_airflow_dag
from .test_project.dagster_airflow_demo import define_demo_execution_pipeline
from .utils import reload_module
IMAGE = 'dagster-airflow-demo'
# py2 compat
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@pytest.fixture(scope='module')
def airflow_home():
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def docker_client():
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='module')
def docker_image(docker_client):
try:
docker_client.images.get(IMAGE)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=IMAGE, script_path=script_relative_path('test_project/build.sh')
)
)
return IMAGE
@pytest.fixture(scope='module')
def dags_path(airflow_home):
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def host_tmp_dir():
mkdir_p('/tmp/results')
return '/tmp/results'
@pytest.fixture(scope='module')
def airflow_test(docker_image, dags_path, plugins_path, host_tmp_dir):
assert docker_image
plugin_definition_filename = 'dagster_plugin.py'
plugin_path = os.path.abspath(os.path.join(plugins_path, plugin_definition_filename))
temporary_plugin_path = None
try:
if os.path.exists(plugin_path):
temporary_plugin_file = tempfile.NamedTemporaryFile(delete=False)
temporary_plugin_file.close()
temporary_plugin_path = temporary_plugin_file.name
shutil.copyfile(plugin_path, temporary_plugin_path)
shutil.copyfile(
script_relative_path(os.path.join('..', 'dagster_airflow', plugin_definition_filename)),
plugin_path,
)
mkdir_p(os.path.abspath(dags_path))
sys.path.append(os.path.abspath(dags_path))
created_init_py = False
init_py_path = os.path.join(os.path.abspath(dags_path), '__init__.py')
if not os.path.exists(init_py_path):
with open(init_py_path, 'a'):
pass
created_init_py = True
subprocess.check_output(['airflow', 'initdb'])
# necromancy; follows airflow.operators.__init__
reload_module(airflow.plugins_manager)
for operators_module in airflow.plugins_manager.operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module
# Test that we can now actually import the DagsterOperator
from airflow.operators.dagster_plugin import DagsterOperator
del DagsterOperator
yield (docker_image, dags_path, host_tmp_dir)
finally:
if os.path.exists(plugin_path):
os.remove(plugin_path)
if temporary_plugin_path is not None:
shutil.copyfile(temporary_plugin_path, plugin_path)
os.remove(temporary_plugin_path)
if created_init_py:
os.remove(init_py_path)
sys.path = sys.path[:-1]
@pytest.fixture(scope='module')
def scaffold_dag(airflow_test):
docker_image, dags_path, _ = airflow_test
pipeline = define_demo_execution_pipeline()
env_config = load_yaml_from_path(script_relative_path('test_project/env.yml'))
tempdir = tempfile.gettempdir()
static_path, editable_path = scaffold_airflow_dag(
pipeline=pipeline,
env_config=env_config,
image=docker_image,
output_path=tempdir,
dag_kwargs={'default_args': {'start_date': datetime.datetime(1900, 1, 1)}},
)
# Ensure that the scaffolded files parse correctly
subprocess.check_output(['python', editable_path])
shutil.copyfile(
static_path, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)))
)
shutil.copyfile(
editable_path, os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)))
)
os.remove(static_path)
os.remove(editable_path)
execution_date = datetime.datetime.utcnow().strftime('%Y-%m-%d')
pipeline_name = pipeline.name
execution_plan = create_execution_plan(pipeline, env_config)
yield (
pipeline_name,
execution_plan,
execution_date,
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))),
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))),
)
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))))
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))))
try:
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)[:-3] + '.pyc'))
)
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)[:-3] + '.pyc'))
)
except (FileNotFoundError, OSError):
pass
| 29.299065
| 100
| 0.693461
|
# pylint: disable=redefined-outer-name
import datetime
import os
import shutil
import subprocess
import sys
import tempfile
import uuid
import airflow.plugins_manager
import docker
import pytest
from dagster import check
from dagster.core.execution import create_execution_plan
from dagster.utils import load_yaml_from_path, mkdir_p, script_relative_path
from dagster_airflow import scaffold_airflow_dag
from .test_project.dagster_airflow_demo import define_demo_execution_pipeline
from .utils import reload_module
IMAGE = 'dagster-airflow-demo'
# py2 compat
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@pytest.fixture(scope='module')
def airflow_home():
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def docker_client():
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='module')
def docker_image(docker_client):
try:
docker_client.images.get(IMAGE)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=IMAGE, script_path=script_relative_path('test_project/build.sh')
)
)
return IMAGE
@pytest.fixture(scope='module')
def dags_path(airflow_home):
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def host_tmp_dir():
mkdir_p('/tmp/results')
return '/tmp/results'
@pytest.fixture(scope='module')
def airflow_test(docker_image, dags_path, plugins_path, host_tmp_dir):
assert docker_image
plugin_definition_filename = 'dagster_plugin.py'
plugin_path = os.path.abspath(os.path.join(plugins_path, plugin_definition_filename))
temporary_plugin_path = None
try:
if os.path.exists(plugin_path):
temporary_plugin_file = tempfile.NamedTemporaryFile(delete=False)
temporary_plugin_file.close()
temporary_plugin_path = temporary_plugin_file.name
shutil.copyfile(plugin_path, temporary_plugin_path)
shutil.copyfile(
script_relative_path(os.path.join('..', 'dagster_airflow', plugin_definition_filename)),
plugin_path,
)
mkdir_p(os.path.abspath(dags_path))
sys.path.append(os.path.abspath(dags_path))
created_init_py = False
init_py_path = os.path.join(os.path.abspath(dags_path), '__init__.py')
if not os.path.exists(init_py_path):
with open(init_py_path, 'a'):
pass
created_init_py = True
subprocess.check_output(['airflow', 'initdb'])
# necromancy; follows airflow.operators.__init__
reload_module(airflow.plugins_manager)
for operators_module in airflow.plugins_manager.operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module
# Test that we can now actually import the DagsterOperator
from airflow.operators.dagster_plugin import DagsterOperator
del DagsterOperator
yield (docker_image, dags_path, host_tmp_dir)
finally:
if os.path.exists(plugin_path):
os.remove(plugin_path)
if temporary_plugin_path is not None:
shutil.copyfile(temporary_plugin_path, plugin_path)
os.remove(temporary_plugin_path)
if created_init_py:
os.remove(init_py_path)
sys.path = sys.path[:-1]
@pytest.fixture(scope='module')
def scaffold_dag(airflow_test):
docker_image, dags_path, _ = airflow_test
pipeline = define_demo_execution_pipeline()
env_config = load_yaml_from_path(script_relative_path('test_project/env.yml'))
tempdir = tempfile.gettempdir()
static_path, editable_path = scaffold_airflow_dag(
pipeline=pipeline,
env_config=env_config,
image=docker_image,
output_path=tempdir,
dag_kwargs={'default_args': {'start_date': datetime.datetime(1900, 1, 1)}},
)
# Ensure that the scaffolded files parse correctly
subprocess.check_output(['python', editable_path])
shutil.copyfile(
static_path, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)))
)
shutil.copyfile(
editable_path, os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)))
)
os.remove(static_path)
os.remove(editable_path)
execution_date = datetime.datetime.utcnow().strftime('%Y-%m-%d')
pipeline_name = pipeline.name
execution_plan = create_execution_plan(pipeline, env_config)
yield (
pipeline_name,
execution_plan,
execution_date,
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))),
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))),
)
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))))
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))))
try:
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)[:-3] + '.pyc'))
)
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)[:-3] + '.pyc'))
)
except (FileNotFoundError, OSError):
pass
| true
| true
|
f716fd7bd67ce55c0a030c87f0370590bbe380cb
| 223
|
py
|
Python
|
tablas_multiplicar.py
|
robertogonzalezp/X-Serv-Python-Multiplica
|
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
|
[
"Apache-2.0"
] | null | null | null |
tablas_multiplicar.py
|
robertogonzalezp/X-Serv-Python-Multiplica
|
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
|
[
"Apache-2.0"
] | null | null | null |
tablas_multiplicar.py
|
robertogonzalezp/X-Serv-Python-Multiplica
|
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
|
[
"Apache-2.0"
] | null | null | null |
for numero1 in range(1, 11):
print('\n', "Tabla del " + str(numero1))
print("--------------")
for numero2 in range(1, 11):
print(str(numero1) + " por " + str(numero2) + " es " + str(numero1 * numero2))
| 31.857143
| 86
| 0.520179
|
for numero1 in range(1, 11):
print('\n', "Tabla del " + str(numero1))
print("--------------")
for numero2 in range(1, 11):
print(str(numero1) + " por " + str(numero2) + " es " + str(numero1 * numero2))
| true
| true
|
f716fda140000ae4d4422e93bc1747b5025d820d
| 933
|
py
|
Python
|
aioredis_opentracing/instrument.py
|
Creativelair/AIORedis-Opentracing
|
eebf81785052faddaec8c00da74c862bbeeafdf9
|
[
"BSD-3-Clause"
] | null | null | null |
aioredis_opentracing/instrument.py
|
Creativelair/AIORedis-Opentracing
|
eebf81785052faddaec8c00da74c862bbeeafdf9
|
[
"BSD-3-Clause"
] | 1
|
2020-10-09T20:23:40.000Z
|
2020-10-09T20:23:40.000Z
|
aioredis_opentracing/instrument.py
|
Creativelair/AIORedis-Opentracing
|
eebf81785052faddaec8c00da74c862bbeeafdf9
|
[
"BSD-3-Clause"
] | null | null | null |
import opentracing
from signalfx_tracing import utils
from wrapt import wrap_function_wrapper
from aioredis_opentracing import tracing
config = utils.Config(
tracer=None,
)
def instrument(tracer=None):
aioredis = utils.get_module('aioredis')
if utils.is_instrumented(aioredis):
return
tracing.init_tracing(tracer=tracer or config.tracer or opentracing.tracer)
def traced_client(__init__, client, args, kwargs):
__init__(*args, **kwargs)
tracing.trace_client(client)
wrap_function_wrapper('aioredis', 'Redis.__init__', traced_client)
utils.mark_instrumented(aioredis)
def uninstrument():
"""Will only prevent new clients from registering tracers."""
aioredis = utils.get_module('aioredis')
if not utils.is_instrumented(aioredis):
return
from aioredis import Redis
utils.revert_wrapper(Redis, '__init__')
utils.mark_uninstrumented(aioredis)
| 25.916667
| 78
| 0.738478
|
import opentracing
from signalfx_tracing import utils
from wrapt import wrap_function_wrapper
from aioredis_opentracing import tracing
config = utils.Config(
tracer=None,
)
def instrument(tracer=None):
aioredis = utils.get_module('aioredis')
if utils.is_instrumented(aioredis):
return
tracing.init_tracing(tracer=tracer or config.tracer or opentracing.tracer)
def traced_client(__init__, client, args, kwargs):
__init__(*args, **kwargs)
tracing.trace_client(client)
wrap_function_wrapper('aioredis', 'Redis.__init__', traced_client)
utils.mark_instrumented(aioredis)
def uninstrument():
aioredis = utils.get_module('aioredis')
if not utils.is_instrumented(aioredis):
return
from aioredis import Redis
utils.revert_wrapper(Redis, '__init__')
utils.mark_uninstrumented(aioredis)
| true
| true
|
f716fdec7574c7c1759ea315c98291d9a23d5771
| 2,751
|
py
|
Python
|
tests/di/core/test_instance.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | 8
|
2021-02-05T16:17:31.000Z
|
2022-03-03T00:01:33.000Z
|
tests/di/core/test_instance.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | null | null | null |
tests/di/core/test_instance.py
|
dlski/python-di
|
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
|
[
"MIT"
] | null | null | null |
import pytest
from di.core.compose import ApplicationComposer, ComposedApplication
from di.core.element import Element
from di.core.injection import InjectionSolver
from di.core.instance import (
ApplicationInstanceElementNotFound,
RecursiveApplicationInstance,
RecursiveApplicationInstanceBuilder,
RecursiveProvideContext,
)
from di.core.module import ModuleElementConsistencyCheck, ModuleImportSolver
from tests.di.core.conftest import AppGenerator
@pytest.fixture
def composer():
return ApplicationComposer(
InjectionSolver(),
ModuleImportSolver(),
ModuleElementConsistencyCheck(),
)
@pytest.fixture
def composed(
app_generator: AppGenerator, composer: ApplicationComposer
) -> ComposedApplication:
return composer.compose(app_generator.valid_app)
def _build_instance(composed: ComposedApplication) -> RecursiveApplicationInstance:
return RecursiveApplicationInstanceBuilder(composed).build()
def test_instance(composed: ComposedApplication, app_generator: AppGenerator):
gen = app_generator
instance = _build_instance(composed)
a1e, a2e = gen.a_elements
(b1e,) = gen.b_elements
c1e, c2e = gen.c_elements
c2 = instance.value_of(c2e)
c1 = instance.value_of(c1e)
b1 = instance.value_of(b1e)
a2 = instance.value_of(a2e)
a1 = instance.value_of(a1e)
assert c2.c1 is c1
assert c1.a2 is a2
assert c1.b1 is b1
assert a2.a1 is a1
def test_instance_not_found(composed: ComposedApplication, app_generator: AppGenerator):
gen = app_generator
instance = _build_instance(composed)
bce = gen.b_cd_elements[-1]
with pytest.raises(ApplicationInstanceElementNotFound):
instance.value_of(bce)
class _RecursiveProvideContext(RecursiveProvideContext):
_call_stack = []
primary_sequence = []
def provide(self, element: Element):
if not self._call_stack:
self.primary_sequence.append(element)
self._call_stack.append(self)
try:
super().provide(element)
finally:
self._call_stack.pop()
class _RecursiveApplicationInstanceBuilder(RecursiveApplicationInstanceBuilder):
def _provide_context(self):
return _RecursiveProvideContext(self.app)
def test_instance_bootstrap(app_generator: AppGenerator, composer: ApplicationComposer):
composed = composer.compose(app_generator.bootstrap_app)
_RecursiveApplicationInstanceBuilder(composed).build()
expected_sequence = []
for module_step in composed.bootstrap_steps:
for step in module_step.steps:
expected_sequence.extend(step)
produced_sequence = _RecursiveProvideContext.primary_sequence
assert expected_sequence == produced_sequence
| 30.230769
| 88
| 0.751
|
import pytest
from di.core.compose import ApplicationComposer, ComposedApplication
from di.core.element import Element
from di.core.injection import InjectionSolver
from di.core.instance import (
ApplicationInstanceElementNotFound,
RecursiveApplicationInstance,
RecursiveApplicationInstanceBuilder,
RecursiveProvideContext,
)
from di.core.module import ModuleElementConsistencyCheck, ModuleImportSolver
from tests.di.core.conftest import AppGenerator
@pytest.fixture
def composer():
return ApplicationComposer(
InjectionSolver(),
ModuleImportSolver(),
ModuleElementConsistencyCheck(),
)
@pytest.fixture
def composed(
app_generator: AppGenerator, composer: ApplicationComposer
) -> ComposedApplication:
return composer.compose(app_generator.valid_app)
def _build_instance(composed: ComposedApplication) -> RecursiveApplicationInstance:
return RecursiveApplicationInstanceBuilder(composed).build()
def test_instance(composed: ComposedApplication, app_generator: AppGenerator):
gen = app_generator
instance = _build_instance(composed)
a1e, a2e = gen.a_elements
(b1e,) = gen.b_elements
c1e, c2e = gen.c_elements
c2 = instance.value_of(c2e)
c1 = instance.value_of(c1e)
b1 = instance.value_of(b1e)
a2 = instance.value_of(a2e)
a1 = instance.value_of(a1e)
assert c2.c1 is c1
assert c1.a2 is a2
assert c1.b1 is b1
assert a2.a1 is a1
def test_instance_not_found(composed: ComposedApplication, app_generator: AppGenerator):
gen = app_generator
instance = _build_instance(composed)
bce = gen.b_cd_elements[-1]
with pytest.raises(ApplicationInstanceElementNotFound):
instance.value_of(bce)
class _RecursiveProvideContext(RecursiveProvideContext):
_call_stack = []
primary_sequence = []
def provide(self, element: Element):
if not self._call_stack:
self.primary_sequence.append(element)
self._call_stack.append(self)
try:
super().provide(element)
finally:
self._call_stack.pop()
class _RecursiveApplicationInstanceBuilder(RecursiveApplicationInstanceBuilder):
def _provide_context(self):
return _RecursiveProvideContext(self.app)
def test_instance_bootstrap(app_generator: AppGenerator, composer: ApplicationComposer):
composed = composer.compose(app_generator.bootstrap_app)
_RecursiveApplicationInstanceBuilder(composed).build()
expected_sequence = []
for module_step in composed.bootstrap_steps:
for step in module_step.steps:
expected_sequence.extend(step)
produced_sequence = _RecursiveProvideContext.primary_sequence
assert expected_sequence == produced_sequence
| true
| true
|
f716fe49497e0fa092b298e9bf377c75b13a12bf
| 1,247
|
py
|
Python
|
servers/python/coweb/auth/public.py
|
opencoweb/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 83
|
2015-01-05T19:02:57.000Z
|
2021-11-19T02:48:09.000Z
|
servers/python/coweb/auth/public.py
|
xuelingxiao/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 3
|
2015-12-16T13:49:33.000Z
|
2019-06-17T13:38:50.000Z
|
servers/python/coweb/auth/public.py
|
xuelingxiao/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 14
|
2015-04-29T22:36:53.000Z
|
2021-11-18T03:24:29.000Z
|
'''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
from .base import AuthBase
class PublicAuth(AuthBase):
cookieName = 'coweb.auth.public.username'
_userId = 0
def requires_login(self):
'''Does not require login. Usernames automatically generated.'''
return False
def requires_cookies(self):
'''Uses tornado's secure cookies.'''
return True
def get_current_user(self, handler):
'''
Generates a unique userXXX for this server instance and stores it in a
secure cookie.
'''
username = handler.get_secure_cookie(self.cookieName)
if not username:
# generate a random username and set it with a very short lifetime
username = 'user%03d' % self._userId
# yes, this might conflict between server restarts but it's dummy
# public auth
self._userId += 1
handler.set_secure_cookie(self.cookieName, username, expires_days=1)
return username
def clear_credentials(self, handler):
'''Clears the authentication cookie.'''
handler.clear_cookie(self.cookieName)
| 34.638889
| 80
| 0.642342
|
from .base import AuthBase
class PublicAuth(AuthBase):
cookieName = 'coweb.auth.public.username'
_userId = 0
def requires_login(self):
return False
def requires_cookies(self):
return True
def get_current_user(self, handler):
username = handler.get_secure_cookie(self.cookieName)
if not username:
username = 'user%03d' % self._userId
# public auth
self._userId += 1
handler.set_secure_cookie(self.cookieName, username, expires_days=1)
return username
def clear_credentials(self, handler):
handler.clear_cookie(self.cookieName)
| true
| true
|
f716fedc6fe525ef992c71afd6110e49481a4bad
| 566
|
py
|
Python
|
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
from ctypes import *
class PyObject(Structure):
_fields_ = [('ob_refcnt', c_size_t),
('ob_type', py_object)]
class PseudoTypeType(object):
def __getattribute__(self, name):
if name == '__repr__':
raise Exception()
elif name == '__name__':
return 'PseudoType'
class PseudoType(object):
def __repr__(self):
return 'pseudo'
PseudoType_ptr = cast(id(PseudoType), POINTER(PyObject))
obj = PseudoType()
PseudoType_ptr.contents.ob_type = py_object(PseudoTypeType)
print()
| 25.727273
| 60
| 0.625442
|
from ctypes import *
class PyObject(Structure):
_fields_ = [('ob_refcnt', c_size_t),
('ob_type', py_object)]
class PseudoTypeType(object):
def __getattribute__(self, name):
if name == '__repr__':
raise Exception()
elif name == '__name__':
return 'PseudoType'
class PseudoType(object):
def __repr__(self):
return 'pseudo'
PseudoType_ptr = cast(id(PseudoType), POINTER(PyObject))
obj = PseudoType()
PseudoType_ptr.contents.ob_type = py_object(PseudoTypeType)
print()
| true
| true
|
f716ff229a4800b23522336aaaedc9e85c29ccb7
| 1,115
|
py
|
Python
|
userbot/modules/__init__.py
|
caerus19/Userator
|
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
|
[
"MIT"
] | null | null | null |
userbot/modules/__init__.py
|
caerus19/Userator
|
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
|
[
"MIT"
] | null | null | null |
userbot/modules/__init__.py
|
caerus19/Userator
|
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
|
[
"MIT"
] | 1
|
2021-11-12T18:00:49.000Z
|
2021-11-12T18:00:49.000Z
|
# U S Σ R Δ T O R / Ümüd
""" U S Σ R Δ T O R """
from userbot import LOGS
from telethon.tl.types import DocumentAttributeFilename
def __list_all_modules():
from os.path import dirname, basename, isfile
import glob
mod_paths = glob.glob(dirname(__file__) + "/*.py")
all_modules = [
basename(f)[:-3] for f in mod_paths
if isfile(f) and f.endswith(".py") and not f.endswith("__init__.py")
]
return all_modules
ALL_MODULES = sorted(__list_all_modules())
LOGS.info("Yüklənəcək modullar: %s", str(ALL_MODULES))
__all__ = ALL_MODULES + ["ALL_MODULES"]
async def MEDIACHECK(reply):
type = "img"
if reply and reply.media:
if reply.photo:
data = reply.photo
elif reply.document:
if DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in reply.media.document.attributes:
return False
if reply.gif or reply.video:
type = "vid"
if reply.audio or reply.voice:
return False
data = reply.media.document
else:
return False
else:
return False
if not data or data is None:
return False
else:
return (data, type)
| 24.777778
| 101
| 0.675336
|
from userbot import LOGS
from telethon.tl.types import DocumentAttributeFilename
def __list_all_modules():
from os.path import dirname, basename, isfile
import glob
mod_paths = glob.glob(dirname(__file__) + "/*.py")
all_modules = [
basename(f)[:-3] for f in mod_paths
if isfile(f) and f.endswith(".py") and not f.endswith("__init__.py")
]
return all_modules
ALL_MODULES = sorted(__list_all_modules())
LOGS.info("Yüklənəcək modullar: %s", str(ALL_MODULES))
__all__ = ALL_MODULES + ["ALL_MODULES"]
async def MEDIACHECK(reply):
type = "img"
if reply and reply.media:
if reply.photo:
data = reply.photo
elif reply.document:
if DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in reply.media.document.attributes:
return False
if reply.gif or reply.video:
type = "vid"
if reply.audio or reply.voice:
return False
data = reply.media.document
else:
return False
else:
return False
if not data or data is None:
return False
else:
return (data, type)
| true
| true
|
f716ff4ad1f5c83780f6a28d11c0f4b0f33e91fd
| 513
|
py
|
Python
|
happyml/graphs/viz.py
|
guiferviz/happyml-py
|
4252d0cff27461e38da404553772dafbc74f3eaa
|
[
"BSD-Source-Code"
] | 1
|
2016-08-15T13:27:48.000Z
|
2016-08-15T13:27:48.000Z
|
happyml/graphs/viz.py
|
guiferviz/happyml-py
|
4252d0cff27461e38da404553772dafbc74f3eaa
|
[
"BSD-Source-Code"
] | null | null | null |
happyml/graphs/viz.py
|
guiferviz/happyml-py
|
4252d0cff27461e38da404553772dafbc74f3eaa
|
[
"BSD-Source-Code"
] | null | null | null |
# FIXME: not required dependency.
from graphviz import Digraph
def graph2dot(x, **kwargs):
dot = Digraph(body=["rankdir=LR;"], **kwargs)
path = x.get_computation_path()
for i in path:
if i.is_input:
dot.node(str(i.id), i.name, color="green")
elif i.is_parameter:
dot.node(str(i.id), i.name, color="gold")
else:
dot.node(str(i.id), i.name)
for ii in i.inputs:
dot.edge(str(ii.id), str(i.id))
return dot
| 22.304348
| 54
| 0.549708
|
from graphviz import Digraph
def graph2dot(x, **kwargs):
dot = Digraph(body=["rankdir=LR;"], **kwargs)
path = x.get_computation_path()
for i in path:
if i.is_input:
dot.node(str(i.id), i.name, color="green")
elif i.is_parameter:
dot.node(str(i.id), i.name, color="gold")
else:
dot.node(str(i.id), i.name)
for ii in i.inputs:
dot.edge(str(ii.id), str(i.id))
return dot
| true
| true
|
f716ff4c138f17dceedf81771978e6db1119f04a
| 1,758
|
py
|
Python
|
setup.py
|
hoogerheide/reductus
|
fcc78c06900cff89faceadb7b4eed7b87914c0af
|
[
"Unlicense"
] | 1
|
2021-06-11T19:24:49.000Z
|
2021-06-11T19:24:49.000Z
|
setup.py
|
hoogerheide/reductus
|
fcc78c06900cff89faceadb7b4eed7b87914c0af
|
[
"Unlicense"
] | null | null | null |
setup.py
|
hoogerheide/reductus
|
fcc78c06900cff89faceadb7b4eed7b87914c0af
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
from setuptools import setup, find_packages
if len(sys.argv) == 1:
sys.argv.append('install')
if sys.argv[1] == 'test':
from subprocess import call
sys.exit(call([sys.executable, '-m', 'pytest'] + sys.argv[2:]))
# Create the resource file dataflow/git_revision
if os.system('"{sys.executable}" dataflow/rev.py'.format(sys=sys)) != 0:
print("setup.py failed to build dataflow/git_revision", file=sys.stderr)
sys.exit(1)
packages = find_packages(exclude=['reflbin'])
#sys.dont_write_bytecode = False
dist = setup(
name='reductus',
version='0.1b2',
author='Paul Kienzle',
author_email='paul.kienzle@nist.gov',
url='http://github.com/reductus/reductus',
description='Data reduction for neutron scattering',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
],
zip_safe=False,
packages=packages,
include_package_data=True,
entry_points = {
'console_scripts': ['reductus=web_gui.run:main'],
},
install_requires=[
'scipy', 'numpy', 'h5py', 'uncertainties', 'docutils',
'wheel', 'pytz', 'msgpack-python', 'flask',
],
extras_require={
'masked_curve_fit': ['numdifftools'],
},
tests_require=['pytest'],
)
# End of file
| 29.79661
| 76
| 0.631968
|
import sys
import os
from setuptools import setup, find_packages
if len(sys.argv) == 1:
sys.argv.append('install')
if sys.argv[1] == 'test':
from subprocess import call
sys.exit(call([sys.executable, '-m', 'pytest'] + sys.argv[2:]))
if os.system('"{sys.executable}" dataflow/rev.py'.format(sys=sys)) != 0:
print("setup.py failed to build dataflow/git_revision", file=sys.stderr)
sys.exit(1)
packages = find_packages(exclude=['reflbin'])
dist = setup(
name='reductus',
version='0.1b2',
author='Paul Kienzle',
author_email='paul.kienzle@nist.gov',
url='http://github.com/reductus/reductus',
description='Data reduction for neutron scattering',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: Public Domain',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
],
zip_safe=False,
packages=packages,
include_package_data=True,
entry_points = {
'console_scripts': ['reductus=web_gui.run:main'],
},
install_requires=[
'scipy', 'numpy', 'h5py', 'uncertainties', 'docutils',
'wheel', 'pytz', 'msgpack-python', 'flask',
],
extras_require={
'masked_curve_fit': ['numdifftools'],
},
tests_require=['pytest'],
)
| true
| true
|
f716ff87b1f5b661462123694cbcb3fe6c7ec595
| 1,524
|
py
|
Python
|
Lesson09/x.py
|
PacktPublishing/Python-Fundamentals
|
f24569826b1b7f97e3d54630a34ae61110ca12da
|
[
"MIT"
] | 1
|
2021-04-23T14:01:56.000Z
|
2021-04-23T14:01:56.000Z
|
Lesson09/x.py
|
PacktPublishing/Python-Fundamentals
|
f24569826b1b7f97e3d54630a34ae61110ca12da
|
[
"MIT"
] | null | null | null |
Lesson09/x.py
|
PacktPublishing/Python-Fundamentals
|
f24569826b1b7f97e3d54630a34ae61110ca12da
|
[
"MIT"
] | 4
|
2021-06-29T05:57:44.000Z
|
2021-09-02T10:14:55.000Z
|
def remove_punctuation(st, case='l'):
""" takes in a string and returns a list of words with no punctuation"""
punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}“”‘’~'
all_words = st.split()
cap_words = []
for c in punctuation:
st = st.replace(c, '')
if case == 'u':
for word in all_words:
if word.istitle():
cap_words.append(word)
return cap_words
if case == 'l':
return all_words
def frequency_dictionary(words):
freq_dict = {}
for word in words:
freq_dict[word] = words.count(word)
return freq_dict
def strip_common_words(words):
unique_words = []
with open('1000words.txt', 'r') as common_words:
common_words = common_words.read()
for word in words:
if word not in common_words:
unique_words.append(word)
return unique_words
def print_ranked_dictionary(dictionary, min_count=20):
rankedList = sorted(dictionary, key=dictionary.get, reverse=True)
# ranking the dictionary (print out ranked words)
for i in range(0, len(rankedList)):
key = rankedList[i]
value = dictionary[key]
if value > min_count:
print(key, ' repeats ', value, ' times')
def main():
with open('HarryPotter.txt', 'r') as text:
text = text.read()
word_list = remove_punctuation(text)
#print(word_list)
dictionary = frequency_dictionary(word_list)
print_ranked_dictionary(dictionary)
main()
| 28.222222
| 76
| 0.602362
|
def remove_punctuation(st, case='l'):
punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}“”‘’~'
all_words = st.split()
cap_words = []
for c in punctuation:
st = st.replace(c, '')
if case == 'u':
for word in all_words:
if word.istitle():
cap_words.append(word)
return cap_words
if case == 'l':
return all_words
def frequency_dictionary(words):
freq_dict = {}
for word in words:
freq_dict[word] = words.count(word)
return freq_dict
def strip_common_words(words):
unique_words = []
with open('1000words.txt', 'r') as common_words:
common_words = common_words.read()
for word in words:
if word not in common_words:
unique_words.append(word)
return unique_words
def print_ranked_dictionary(dictionary, min_count=20):
rankedList = sorted(dictionary, key=dictionary.get, reverse=True)
# ranking the dictionary (print out ranked words)
for i in range(0, len(rankedList)):
key = rankedList[i]
value = dictionary[key]
if value > min_count:
print(key, ' repeats ', value, ' times')
def main():
with open('HarryPotter.txt', 'r') as text:
text = text.read()
word_list = remove_punctuation(text)
#print(word_list)
dictionary = frequency_dictionary(word_list)
print_ranked_dictionary(dictionary)
main()
| true
| true
|
f716fffcedc3cbaba6d963cd4a7e2061ef83cc34
| 4,109
|
py
|
Python
|
mdeepctr/models/xdeepfm.py
|
TS-SE-GROUP/icme2019
|
7eefdb7de6a7ff3bec1721fafb822d80d80dbba3
|
[
"MIT"
] | 78
|
2019-02-21T12:44:11.000Z
|
2022-03-30T11:42:33.000Z
|
mdeepctr/models/xdeepfm.py
|
rightnowandholdon/icme2019
|
fe9b31db7bf19b08d5e5d41a259f0a297eb21766
|
[
"MIT"
] | 6
|
2019-04-11T13:14:46.000Z
|
2021-05-19T14:36:07.000Z
|
mdeepctr/models/xdeepfm.py
|
rightnowandholdon/icme2019
|
fe9b31db7bf19b08d5e5d41a259f0a297eb21766
|
[
"MIT"
] | 22
|
2019-02-21T02:51:54.000Z
|
2021-12-10T02:04:28.000Z
|
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf)
"""
import tensorflow as tf
from ..input_embedding import preprocess_input_embedding
from ..layers.core import PredictionLayer, MLP
from ..layers.interaction import CIN
from ..utils import check_feature_config_dict
from ..layers.utils import concat_fun
def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):
"""Instantiates the xDeepFM architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network
:param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
:param cin_activation: activation function used on feature maps
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: L2 regularizer strength applied to embedding vector
:param l2_reg_deep: L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, seed)(fm_input)
exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out)
deep_input = tf.keras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear
final_logit = linear_logit
elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN
final_logit = tf.keras.layers.add([linear_logit, exFM_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep
final_logit = tf.keras.layers.add(
[linear_logit, deep_logit, exFM_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
| 54.065789
| 325
| 0.690436
|
import tensorflow as tf
from ..input_embedding import preprocess_input_embedding
from ..layers.core import PredictionLayer, MLP
from ..layers.interaction import CIN
from ..utils import check_feature_config_dict
from ..layers.utils import concat_fun
def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, seed)(fm_input)
exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out)
deep_input = tf.keras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and len(cin_layer_size) == 0:
final_logit = linear_logit
elif len(hidden_size) == 0 and len(cin_layer_size) > 0:
final_logit = tf.keras.layers.add([linear_logit, exFM_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) == 0:
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) > 0:
final_logit = tf.keras.layers.add(
[linear_logit, deep_logit, exFM_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
| true
| true
|
f717001c298682adebd3ce0fdb017a670744af55
| 4,004
|
py
|
Python
|
ansible/deploy.py
|
CloudReactor/aws-ecs-cloudreactor-deployer
|
0abf182780dae41e646eef7ef23c029a9cbbba43
|
[
"BSD-2-Clause"
] | null | null | null |
ansible/deploy.py
|
CloudReactor/aws-ecs-cloudreactor-deployer
|
0abf182780dae41e646eef7ef23c029a9cbbba43
|
[
"BSD-2-Clause"
] | null | null | null |
ansible/deploy.py
|
CloudReactor/aws-ecs-cloudreactor-deployer
|
0abf182780dae41e646eef7ef23c029a9cbbba43
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/local/bin/python
import argparse
import logging
import os
import shlex
import subprocess
import sys
_DEFAULT_LOG_LEVEL = 'INFO'
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='deploy', allow_abbrev=False,
description="""
Deploys a project to AWS ECS and CloudReactor using Ansible.
""")
deployment = os.environ.get('DEPLOYMENT_ENVIRONMENT')
tasks_str = os.environ.get('TASK_NAMES')
ansible_args_str = os.environ.get('EXTRA_ANSIBLE_OPTIONS')
default_ansible_args = []
if ansible_args_str:
default_ansible_args = shlex.split(ansible_args_str)
parser.add_argument('deployment',
nargs=('?' if deployment else None), default=deployment,
help='Name of deployment environment (i.e. staging, production)')
parser.add_argument('tasks', nargs='?', default=tasks_str or 'ALL',
help='Comma-separated list of Tasks to deploy, or "ALL".')
parser.add_argument('-l', '--log-level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default=os.environ.get(
'CLOUDREACTOR_DEPLOYER_LOG_LEVEL', _DEFAULT_LOG_LEVEL),
help=f"Log level. Defaults to {_DEFAULT_LOG_LEVEL}.")
parser.add_argument('--ansible-args',
nargs=argparse.REMAINDER, default=default_ansible_args,
help='Additional options passed to ansible-playbook')
args = parser.parse_args()
log_level = args.log_level.upper()
numeric_log_level = getattr(logging, log_level, None)
if not isinstance(numeric_log_level, int):
logging.warning(f"Invalid log level: {log_level}, defaulting to {_DEFAULT_LOG_LEVEL}")
numeric_log_level = getattr(logging, _DEFAULT_LOG_LEVEL, None)
logging.basicConfig(level=numeric_log_level,
format="CloudReactor Deployer: %(asctime)s %(levelname)s: %(message)s")
deployment = args.deployment
tasks_str = args.tasks
logging.debug(f"Log level = {log_level}")
logging.info(f"Deployment environment = {deployment}")
logging.info(f"Tasks to deploy = {tasks_str}")
logging.info(f"Ansible args = {args.ansible_args}")
process_env = os.environ.copy()
# So that scripts called by ansible-playbook (such as password files)
# will have these available
process_env['DEPLOYMENT_ENVIRONMENT'] = deployment
process_env['TASK_NAMES'] = tasks_str
# So that merged configuration hashes in YAML don't cause warnings
process_env['ANSIBLE_DUPLICATE_YAML_DICT_KEY'] = 'ignore'
work_dir = os.environ.get('GITHUB_WORKSPACE')
if work_dir and not os.environ.get('WORK_DIR'):
logging.debug(f"Found GitHub workspace dir = {work_dir}")
process_env['WORK_DIR'] = work_dir
resolved_work_dir = work_dir or os.environ.get('WORK_DIR') or '/work'
relative_docker_context_dir = os.environ.get('RELATIVE_DOCKER_CONTEXT_DIR')
if relative_docker_context_dir:
process_env['CONTAINER_DOCKER_CONTEXT_DIR'] = resolved_work_dir + '/' \
+ relative_docker_context_dir
relative_dockerfile_path = os.environ.get('RELATIVE_DOCKERFILE_PATH')
if relative_dockerfile_path:
process_env['DOCKERFILE_PATH'] = resolved_work_dir + '/' \
+ relative_dockerfile_path
command_line = ['ansible-playbook', '--extra-vars']
# TODO: sanitize
extra_vars = f'env="{deployment}" task_names="{tasks_str}"'
command_line.append(extra_vars)
command_line += args.ansible_args
ansible_vault_password = os.environ.get('ANSIBLE_VAULT_PASSWORD')
if ansible_vault_password:
command_line.append('--vault-password-file')
command_line.append('/work/vault_pass_from_env.sh')
command_line.append('/work/deploy.yml')
logging.debug(f"Ansible command line = {command_line}")
try:
subprocess.run(command_line, env=process_env,
check=True)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
| 34.817391
| 94
| 0.693556
|
import argparse
import logging
import os
import shlex
import subprocess
import sys
_DEFAULT_LOG_LEVEL = 'INFO'
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='deploy', allow_abbrev=False,
description="""
Deploys a project to AWS ECS and CloudReactor using Ansible.
""")
deployment = os.environ.get('DEPLOYMENT_ENVIRONMENT')
tasks_str = os.environ.get('TASK_NAMES')
ansible_args_str = os.environ.get('EXTRA_ANSIBLE_OPTIONS')
default_ansible_args = []
if ansible_args_str:
default_ansible_args = shlex.split(ansible_args_str)
parser.add_argument('deployment',
nargs=('?' if deployment else None), default=deployment,
help='Name of deployment environment (i.e. staging, production)')
parser.add_argument('tasks', nargs='?', default=tasks_str or 'ALL',
help='Comma-separated list of Tasks to deploy, or "ALL".')
parser.add_argument('-l', '--log-level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default=os.environ.get(
'CLOUDREACTOR_DEPLOYER_LOG_LEVEL', _DEFAULT_LOG_LEVEL),
help=f"Log level. Defaults to {_DEFAULT_LOG_LEVEL}.")
parser.add_argument('--ansible-args',
nargs=argparse.REMAINDER, default=default_ansible_args,
help='Additional options passed to ansible-playbook')
args = parser.parse_args()
log_level = args.log_level.upper()
numeric_log_level = getattr(logging, log_level, None)
if not isinstance(numeric_log_level, int):
logging.warning(f"Invalid log level: {log_level}, defaulting to {_DEFAULT_LOG_LEVEL}")
numeric_log_level = getattr(logging, _DEFAULT_LOG_LEVEL, None)
logging.basicConfig(level=numeric_log_level,
format="CloudReactor Deployer: %(asctime)s %(levelname)s: %(message)s")
deployment = args.deployment
tasks_str = args.tasks
logging.debug(f"Log level = {log_level}")
logging.info(f"Deployment environment = {deployment}")
logging.info(f"Tasks to deploy = {tasks_str}")
logging.info(f"Ansible args = {args.ansible_args}")
process_env = os.environ.copy()
process_env['DEPLOYMENT_ENVIRONMENT'] = deployment
process_env['TASK_NAMES'] = tasks_str
process_env['ANSIBLE_DUPLICATE_YAML_DICT_KEY'] = 'ignore'
work_dir = os.environ.get('GITHUB_WORKSPACE')
if work_dir and not os.environ.get('WORK_DIR'):
logging.debug(f"Found GitHub workspace dir = {work_dir}")
process_env['WORK_DIR'] = work_dir
resolved_work_dir = work_dir or os.environ.get('WORK_DIR') or '/work'
relative_docker_context_dir = os.environ.get('RELATIVE_DOCKER_CONTEXT_DIR')
if relative_docker_context_dir:
process_env['CONTAINER_DOCKER_CONTEXT_DIR'] = resolved_work_dir + '/' \
+ relative_docker_context_dir
relative_dockerfile_path = os.environ.get('RELATIVE_DOCKERFILE_PATH')
if relative_dockerfile_path:
process_env['DOCKERFILE_PATH'] = resolved_work_dir + '/' \
+ relative_dockerfile_path
command_line = ['ansible-playbook', '--extra-vars']
# TODO: sanitize
extra_vars = f'env="{deployment}" task_names="{tasks_str}"'
command_line.append(extra_vars)
command_line += args.ansible_args
ansible_vault_password = os.environ.get('ANSIBLE_VAULT_PASSWORD')
if ansible_vault_password:
command_line.append('--vault-password-file')
command_line.append('/work/vault_pass_from_env.sh')
command_line.append('/work/deploy.yml')
logging.debug(f"Ansible command line = {command_line}")
try:
subprocess.run(command_line, env=process_env,
check=True)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.returncode)
| true
| true
|
f71700a02e9af92b95c627de9e9deee219c9f35c
| 2,411
|
py
|
Python
|
Chapter04/flask_test.py
|
cloudnatif/Cloud-Natif
|
13b318981d946f26b37db6073252dcb31d0760b1
|
[
"MIT"
] | 94
|
2017-07-28T07:19:42.000Z
|
2022-01-25T21:09:06.000Z
|
Chapter04/flask_test.py
|
cloudnatif/Cloud-Natif
|
13b318981d946f26b37db6073252dcb31d0760b1
|
[
"MIT"
] | null | null | null |
Chapter04/flask_test.py
|
cloudnatif/Cloud-Natif
|
13b318981d946f26b37db6073252dcb31d0760b1
|
[
"MIT"
] | 78
|
2017-03-15T17:27:55.000Z
|
2022-01-25T21:09:30.000Z
|
# Test cases for restful API
from app import app
import unittest
class FlaskappTests(unittest.TestCase):
def setUp(self):
# creates a test client
self.app = app.test_client()
# propagate the exceptions to the test client
self.app.testing = True
def test_users_status_code(self):
# sends HTTP GET request to the application
# on the specified path
result = self.app.get('/api/v1/users')
print (result)
# assert the status code of the response
self.assertEqual(result.status_code, 200)
def test_tweets_status_code(self):
# sends HTTP GET request to the application
# on the specified path
result = self.app.get('/api/v2/tweets')
# assert the status code of the response
self.assertEqual(result.status_code, 200)
def test_addusers_status_code(self):
# sends HTTP POST request to the application
# on the specified path
result = self.app.post('/api/v1/users', data='{ "username":"Ovestint", "email": "ronaldrvera@jourrapide.com", "password": "juzahpei6e", "name":"Ronald R. Vera"}', content_type='application/json')
print (result)
# assert the status code of the response
self.assertEquals(result.status_code, 201)
def test_updusers_status_code(self):
# sends HTTP PUT request to the application
# on the specified path
result = self.app.put('/api/v1/users/5', data='{"username":"Tagning", "email": "leolaLguertin@teleworm.us"}', content_type='application/json')
# assert the status code of the response
self.assertEquals(result.status_code, 200)
def test_addtweets_status_code(self):
# sends HTTP GET request to the application
# on the specified path
result = self.app.post('/api/v2/tweets', data='{"username":"Tagning", "body": "It Works!#Awesome"}', content_type='application/json')
# assert the status code of the response
self.assertEqual(result.status_code, 201)
def test_delusers_status_code(self):
# sends HTTP Delete request to the application
# on the specified path
result = self.app.delete('/api/v1/users', data='{"username":"Ovestint"}', content_type='application/json')
print (result)
# assert the status code of the response
self.assertEquals(result.status_code, 200)
| 43.053571
| 203
| 0.662796
|
from app import app
import unittest
class FlaskappTests(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def test_users_status_code(self):
result = self.app.get('/api/v1/users')
print (result)
self.assertEqual(result.status_code, 200)
def test_tweets_status_code(self):
result = self.app.get('/api/v2/tweets')
self.assertEqual(result.status_code, 200)
def test_addusers_status_code(self):
result = self.app.post('/api/v1/users', data='{ "username":"Ovestint", "email": "ronaldrvera@jourrapide.com", "password": "juzahpei6e", "name":"Ronald R. Vera"}', content_type='application/json')
print (result)
self.assertEquals(result.status_code, 201)
def test_updusers_status_code(self):
result = self.app.put('/api/v1/users/5', data='{"username":"Tagning", "email": "leolaLguertin@teleworm.us"}', content_type='application/json')
self.assertEquals(result.status_code, 200)
def test_addtweets_status_code(self):
result = self.app.post('/api/v2/tweets', data='{"username":"Tagning", "body": "It Works!#Awesome"}', content_type='application/json')
self.assertEqual(result.status_code, 201)
def test_delusers_status_code(self):
result = self.app.delete('/api/v1/users', data='{"username":"Ovestint"}', content_type='application/json')
print (result)
self.assertEquals(result.status_code, 200)
| true
| true
|
f71700e3179565aa0840be71453d64b75aaf1335
| 17,142
|
py
|
Python
|
eventlet/queue.py
|
ljz888666555/eventlet
|
203e629212be5cf6e53d577734421d494b255754
|
[
"MIT"
] | 1
|
2016-09-20T21:00:28.000Z
|
2016-09-20T21:00:28.000Z
|
eventlet/queue.py
|
ljz888666555/eventlet
|
203e629212be5cf6e53d577734421d494b255754
|
[
"MIT"
] | null | null | null |
eventlet/queue.py
|
ljz888666555/eventlet
|
203e629212be5cf6e53d577734421d494b255754
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com
# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Synchronized queues.
The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
queues that work across greenlets, with the API similar to the classes found in
the standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>`
modules.
A major difference is that queues in this module operate as channels when
initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
a call to :meth:`Queue.get` retrieves the item.
An interesting difference, made possible because of greenthreads, is
that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
used as indicators of whether the subsequent :meth:`Queue.get`
or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
and :meth:`Queue.putting` report on the number of greenthreads blocking
in :meth:`put <Queue.put>` or :meth:`get <Queue.get>` respectively.
"""
from __future__ import print_function
import sys
import heapq
import collections
import traceback
from eventlet.event import Event
from eventlet.greenthread import getcurrent
from eventlet.hubs import get_hub
from eventlet.support import six
from eventlet.timeout import Timeout
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty']
_NONE = object()
Full = six.moves.queue.Full
Empty = six.moves.queue.Empty
class Waiter(object):
"""A low level synchronization class.
Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe:
* switching will occur only if the waiting greenlet is executing :meth:`wait`
method currently. Otherwise, :meth:`switch` and :meth:`throw` are no-ops.
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
The :meth:`wait` method must be called from a greenlet other than :class:`Hub`.
"""
__slots__ = ['greenlet']
def __init__(self):
self.greenlet = None
def __repr__(self):
if self.waiting:
waiting = ' waiting'
else:
waiting = ''
return '<%s at %s%s greenlet=%r>' % (type(self).__name__, hex(id(self)), waiting, self.greenlet)
def __str__(self):
"""
>>> print(Waiter())
<Waiter greenlet=None>
"""
if self.waiting:
waiting = ' waiting'
else:
waiting = ''
return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet)
def __nonzero__(self):
return self.greenlet is not None
__bool__ = __nonzero__
@property
def waiting(self):
return self.greenlet is not None
def switch(self, value=None):
"""Wake up the greenlet that is calling wait() currently (if there is one).
Can only be called from Hub's greenlet.
"""
assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
if self.greenlet is not None:
try:
self.greenlet.switch(value)
except:
traceback.print_exc()
def throw(self, *throw_args):
"""Make greenlet calling wait() wake up (if there is a wait()).
Can only be called from Hub's greenlet.
"""
assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
if self.greenlet is not None:
try:
self.greenlet.throw(*throw_args)
except:
traceback.print_exc()
# XXX should be renamed to get() ? and the whole class is called Receiver?
def wait(self):
"""Wait until switch() or throw() is called.
"""
assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
self.greenlet = getcurrent()
try:
return get_hub().switch()
finally:
self.greenlet = None
class LightQueue(object):
"""
This is a variant of Queue that behaves mostly like the standard
:class:`Queue`. It differs by not supporting the
:meth:`task_done <Queue.task_done>` or :meth:`join <Queue.join>` methods,
and is a little faster for not having that overhead.
"""
def __init__(self, maxsize=None):
if maxsize is None or maxsize < 0: # None is not comparable in 3.x
self.maxsize = None
else:
self.maxsize = maxsize
self.getters = set()
self.putters = set()
self._event_unlock = None
self._init(maxsize)
# QQQ make maxsize into a property with setter that schedules unlock if necessary
def _init(self, maxsize):
self.queue = collections.deque()
def _get(self):
return self.queue.popleft()
def _put(self, item):
self.queue.append(item)
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, 'queue', None):
result += ' queue=%r' % self.queue
if self.getters:
result += ' getters[%s]' % len(self.getters)
if self.putters:
result += ' putters[%s]' % len(self.putters)
if self._event_unlock is not None:
result += ' unlocking'
return result
def qsize(self):
"""Return the size of the queue."""
return len(self.queue)
def resize(self, size):
"""Resizes the queue's maximum size.
If the size is increased, and there are putters waiting, they may be woken up."""
if self.maxsize is not None and (size is None or size > self.maxsize): # None is not comparable in 3.x
# Maybe wake some stuff up
self._schedule_unlock()
self.maxsize = size
def putting(self):
"""Returns the number of greenthreads that are blocked waiting to put
items into the queue."""
return len(self.putters)
def getting(self):
"""Returns the number of greenthreads that are blocked waiting on an
empty queue."""
return len(self.getters)
def empty(self):
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
return not self.qsize()
def full(self):
"""Return ``True`` if the queue is full, ``False`` otherwise.
``Queue(None)`` is never full.
"""
return self.maxsize is not None and self.qsize() >= self.maxsize # None is not comparable in 3.x
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional arg *block* is true and *timeout* is ``None`` (the default),
block if necessary until a free slot is available. If *timeout* is
a positive number, it blocks at most *timeout* seconds and raises
the :class:`Full` exception if no free slot was available within that time.
Otherwise (*block* is false), put an item on the queue if a free slot
is immediately available, else raise the :class:`Full` exception (*timeout*
is ignored in that case).
"""
if self.maxsize is None or self.qsize() < self.maxsize:
# there's a free slot, put an item right away
self._put(item)
if self.getters:
self._schedule_unlock()
elif not block and get_hub().greenlet is getcurrent():
# we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
# find a getter and deliver an item to it
while self.getters:
getter = self.getters.pop()
if getter:
self._put(item)
item = self._get()
getter.switch(item)
return
raise Full
elif block:
waiter = ItemWaiter(item)
self.putters.add(waiter)
timeout = Timeout(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.wait()
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
if waiter.item is not _NONE:
self._put(item)
finally:
timeout.cancel()
self.putters.discard(waiter)
else:
raise Full
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the :class:`Full` exception.
"""
self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
if self.putters:
self._schedule_unlock()
return self._get()
elif not block and get_hub().greenlet is getcurrent():
# special case to make get_nowait() runnable in the mainloop greenlet
# there are no items in the queue; try to fix the situation by unlocking putters
while self.putters:
putter = self.putters.pop()
if putter:
putter.switch(putter)
if self.qsize():
return self._get()
raise Empty
elif block:
waiter = Waiter()
timeout = Timeout(timeout, Empty)
try:
self.getters.add(waiter)
if self.putters:
self._schedule_unlock()
return waiter.wait()
finally:
self.getters.discard(waiter)
timeout.cancel()
else:
raise Empty
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the :class:`Empty` exception.
"""
return self.get(False)
def _unlock(self):
try:
while True:
if self.qsize() and self.getters:
getter = self.getters.pop()
if getter:
try:
item = self._get()
except:
getter.throw(*sys.exc_info())
else:
getter.switch(item)
elif self.putters and self.getters:
putter = self.putters.pop()
if putter:
getter = self.getters.pop()
if getter:
item = putter.item
putter.item = _NONE # this makes greenlet calling put() not to call _put() again
self._put(item)
item = self._get()
getter.switch(item)
putter.switch(putter)
else:
self.putters.add(putter)
elif self.putters and (self.getters or self.maxsize is None or self.qsize() < self.maxsize):
putter = self.putters.pop()
putter.switch(putter)
else:
break
finally:
self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent?
# i.e. whether this event is pending _OR_ currently executing
# testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
# to avoid this, schedule unlock with timer(0, ...) once in a while
def _schedule_unlock(self):
if self._event_unlock is None:
self._event_unlock = get_hub().schedule_call_global(0, self._unlock)
class ItemWaiter(Waiter):
__slots__ = ['item']
def __init__(self, item):
Waiter.__init__(self)
self.item = item
class Queue(LightQueue):
'''Create a queue object with a given maximum size.
If *maxsize* is less than zero or ``None``, the queue size is infinite.
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
until the item is delivered. (This is unlike the standard :class:`Queue`,
where 0 means infinite size).
In all other respects, this Queue class resembled the standard library,
:class:`Queue`.
'''
def __init__(self, maxsize=None):
LightQueue.__init__(self, maxsize)
self.unfinished_tasks = 0
self._cond = Event()
def _format(self):
result = LightQueue._format(self)
if self.unfinished_tasks:
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
return result
def _put(self, item):
LightQueue._put(self, item)
self._put_bookkeeping()
def _put_bookkeeping(self):
self.unfinished_tasks += 1
if self._cond.ready():
self._cond.reset()
def task_done(self):
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
that the processing on the task is complete.
If a :meth:`join` is currently blocking, it will resume when all items have been processed
(meaning that a :meth:`task_done` call was received for every item that had been
:meth:`put <Queue.put>` into the queue).
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
'''
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0:
self._cond.send(None)
def join(self):
'''Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the queue.
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
that the item was retrieved and all work on it is complete. When the count of
unfinished tasks drops to zero, :meth:`join` unblocks.
'''
if self.unfinished_tasks > 0:
self._cond.wait()
class PriorityQueue(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
'''
def _init(self, maxsize):
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
self._put_bookkeeping()
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _put(self, item):
self.queue.append(item)
self._put_bookkeeping()
def _get(self):
return self.queue.pop()
| 37.184382
| 117
| 0.605064
|
from __future__ import print_function
import sys
import heapq
import collections
import traceback
from eventlet.event import Event
from eventlet.greenthread import getcurrent
from eventlet.hubs import get_hub
from eventlet.support import six
from eventlet.timeout import Timeout
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty']
_NONE = object()
Full = six.moves.queue.Full
Empty = six.moves.queue.Empty
class Waiter(object):
__slots__ = ['greenlet']
def __init__(self):
self.greenlet = None
def __repr__(self):
if self.waiting:
waiting = ' waiting'
else:
waiting = ''
return '<%s at %s%s greenlet=%r>' % (type(self).__name__, hex(id(self)), waiting, self.greenlet)
def __str__(self):
if self.waiting:
waiting = ' waiting'
else:
waiting = ''
return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet)
def __nonzero__(self):
return self.greenlet is not None
__bool__ = __nonzero__
@property
def waiting(self):
return self.greenlet is not None
def switch(self, value=None):
assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
if self.greenlet is not None:
try:
self.greenlet.switch(value)
except:
traceback.print_exc()
def throw(self, *throw_args):
assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop"
if self.greenlet is not None:
try:
self.greenlet.throw(*throw_args)
except:
traceback.print_exc()
def wait(self):
assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
self.greenlet = getcurrent()
try:
return get_hub().switch()
finally:
self.greenlet = None
class LightQueue(object):
def __init__(self, maxsize=None):
if maxsize is None or maxsize < 0:
self.maxsize = None
else:
self.maxsize = maxsize
self.getters = set()
self.putters = set()
self._event_unlock = None
self._init(maxsize)
def _init(self, maxsize):
self.queue = collections.deque()
def _get(self):
return self.queue.popleft()
def _put(self, item):
self.queue.append(item)
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, 'queue', None):
result += ' queue=%r' % self.queue
if self.getters:
result += ' getters[%s]' % len(self.getters)
if self.putters:
result += ' putters[%s]' % len(self.putters)
if self._event_unlock is not None:
result += ' unlocking'
return result
def qsize(self):
return len(self.queue)
def resize(self, size):
if self.maxsize is not None and (size is None or size > self.maxsize):
self._schedule_unlock()
self.maxsize = size
def putting(self):
return len(self.putters)
def getting(self):
return len(self.getters)
def empty(self):
return not self.qsize()
def full(self):
return self.maxsize is not None and self.qsize() >= self.maxsize
def put(self, item, block=True, timeout=None):
if self.maxsize is None or self.qsize() < self.maxsize:
self._put(item)
if self.getters:
self._schedule_unlock()
elif not block and get_hub().greenlet is getcurrent():
# we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
while self.getters:
getter = self.getters.pop()
if getter:
self._put(item)
item = self._get()
getter.switch(item)
return
raise Full
elif block:
waiter = ItemWaiter(item)
self.putters.add(waiter)
timeout = Timeout(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.wait()
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
if waiter.item is not _NONE:
self._put(item)
finally:
timeout.cancel()
self.putters.discard(waiter)
else:
raise Full
def put_nowait(self, item):
self.put(item, False)
def get(self, block=True, timeout=None):
if self.qsize():
if self.putters:
self._schedule_unlock()
return self._get()
elif not block and get_hub().greenlet is getcurrent():
while self.putters:
putter = self.putters.pop()
if putter:
putter.switch(putter)
if self.qsize():
return self._get()
raise Empty
elif block:
waiter = Waiter()
timeout = Timeout(timeout, Empty)
try:
self.getters.add(waiter)
if self.putters:
self._schedule_unlock()
return waiter.wait()
finally:
self.getters.discard(waiter)
timeout.cancel()
else:
raise Empty
def get_nowait(self):
return self.get(False)
def _unlock(self):
try:
while True:
if self.qsize() and self.getters:
getter = self.getters.pop()
if getter:
try:
item = self._get()
except:
getter.throw(*sys.exc_info())
else:
getter.switch(item)
elif self.putters and self.getters:
putter = self.putters.pop()
if putter:
getter = self.getters.pop()
if getter:
item = putter.item
putter.item = _NONE
self._put(item)
item = self._get()
getter.switch(item)
putter.switch(putter)
else:
self.putters.add(putter)
elif self.putters and (self.getters or self.maxsize is None or self.qsize() < self.maxsize):
putter = self.putters.pop()
putter.switch(putter)
else:
break
finally:
self._event_unlock = None
# i.e. whether this event is pending _OR_ currently executing
# testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
# to avoid this, schedule unlock with timer(0, ...) once in a while
def _schedule_unlock(self):
if self._event_unlock is None:
self._event_unlock = get_hub().schedule_call_global(0, self._unlock)
class ItemWaiter(Waiter):
__slots__ = ['item']
def __init__(self, item):
Waiter.__init__(self)
self.item = item
class Queue(LightQueue):
def __init__(self, maxsize=None):
LightQueue.__init__(self, maxsize)
self.unfinished_tasks = 0
self._cond = Event()
def _format(self):
result = LightQueue._format(self)
if self.unfinished_tasks:
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
return result
def _put(self, item):
LightQueue._put(self, item)
self._put_bookkeeping()
def _put_bookkeeping(self):
self.unfinished_tasks += 1
if self._cond.ready():
self._cond.reset()
def task_done(self):
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0:
self._cond.send(None)
def join(self):
if self.unfinished_tasks > 0:
self._cond.wait()
class PriorityQueue(Queue):
def _init(self, maxsize):
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
self._put_bookkeeping()
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
def _init(self, maxsize):
self.queue = []
def _put(self, item):
self.queue.append(item)
self._put_bookkeeping()
def _get(self):
return self.queue.pop()
| true
| true
|
f7170216f50bfa3fdd6fc09085ea89978ae0086b
| 756
|
py
|
Python
|
googlelyrics/__main__.py
|
lastsecondsave/googlelyrics
|
b022953b82d84fee4caa1e7ea1edbc0374b89a22
|
[
"MIT"
] | null | null | null |
googlelyrics/__main__.py
|
lastsecondsave/googlelyrics
|
b022953b82d84fee4caa1e7ea1edbc0374b89a22
|
[
"MIT"
] | null | null | null |
googlelyrics/__main__.py
|
lastsecondsave/googlelyrics
|
b022953b82d84fee4caa1e7ea1edbc0374b89a22
|
[
"MIT"
] | null | null | null |
import sys
from argparse import ArgumentParser
from . import search
def parse_args():
parser = ArgumentParser(prog="googlelyrics")
parser.add_argument(
"--no-header", action="store_true", help="don't print the info header"
)
parser.add_argument("query", type=str, nargs="+", help="search query")
return parser.parse_args()
# pylint: disable=broad-except
def main():
args = parse_args()
try:
lyrics = search(" ".join(args.query))
except BaseException:
sys.exit(sys.exc_info()[1])
if not lyrics:
sys.exit("No lyrics found")
if not args.no_header:
print(f"{lyrics.artist} - {lyrics.title}\n")
print("\n".join(lyrics.lines))
if __name__ == "__main__":
main()
| 19.894737
| 78
| 0.633598
|
import sys
from argparse import ArgumentParser
from . import search
def parse_args():
parser = ArgumentParser(prog="googlelyrics")
parser.add_argument(
"--no-header", action="store_true", help="don't print the info header"
)
parser.add_argument("query", type=str, nargs="+", help="search query")
return parser.parse_args()
# pylint: disable=broad-except
def main():
args = parse_args()
try:
lyrics = search(" ".join(args.query))
except BaseException:
sys.exit(sys.exc_info()[1])
if not lyrics:
sys.exit("No lyrics found")
if not args.no_header:
print(f"{lyrics.artist} - {lyrics.title}\n")
print("\n".join(lyrics.lines))
if __name__ == "__main__":
main()
| true
| true
|
f717023372944f7baec384fa86924fa77eb99257
| 894
|
py
|
Python
|
alembic/versions/d6fcd3132857_episode_source.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 5
|
2021-07-01T16:31:29.000Z
|
2022-01-29T14:32:13.000Z
|
alembic/versions/d6fcd3132857_episode_source.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 45
|
2020-10-25T19:41:26.000Z
|
2022-03-25T06:31:58.000Z
|
alembic/versions/d6fcd3132857_episode_source.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 1
|
2022-01-27T11:30:07.000Z
|
2022-01-27T11:30:07.000Z
|
"""episode_source
Revision ID: d6fcd3132857
Revises: bca0b2a3b5f4
Create Date: 2021-10-17 18:51:29.927189
"""
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
# revision identifiers, used by Alembic.
revision = "d6fcd3132857"
down_revision = "bca0b2a3b5f4"
branch_labels = None
depends_on = None
episodes = table("podcast_episodes", column("source_type", String))
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("podcast_episodes", sa.Column("source_type", sa.VARCHAR(16), nullable=True))
op.execute(episodes.update().values({"source_type": "YOUTUBE"}))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("podcast_episodes", "source_type")
# ### end Alembic commands ###
| 26.294118
| 94
| 0.719239
|
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
revision = "d6fcd3132857"
down_revision = "bca0b2a3b5f4"
branch_labels = None
depends_on = None
episodes = table("podcast_episodes", column("source_type", String))
def upgrade():
| true
| true
|
f7170340f1b333ec00377c1b81345ad679ae62c9
| 9,484
|
py
|
Python
|
homeassistant/components/lcn/__init__.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-01-21T18:09:09.000Z
|
2022-01-17T08:06:03.000Z
|
homeassistant/components/lcn/__init__.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
homeassistant/components/lcn/__init__.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 6
|
2020-04-10T06:21:11.000Z
|
2021-07-01T08:53:38.000Z
|
"""Support for LCN devices."""
import logging
import pypck
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COVERS,
CONF_HOST,
CONF_LIGHTS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.entity import Entity
from .const import (
BINSENSOR_PORTS,
CONF_CLIMATES,
CONF_CONNECTIONS,
CONF_DIM_MODE,
CONF_DIMMABLE,
CONF_LOCKABLE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_MOTOR,
CONF_OUTPUT,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_REVERSE_TIME,
CONF_SCENE,
CONF_SCENES,
CONF_SETPOINT,
CONF_SK_NUM_TRIES,
CONF_SOURCE,
CONF_TRANSITION,
DATA_LCN,
DIM_MODES,
DOMAIN,
KEYS,
LED_PORTS,
LOGICOP_PORTS,
MOTOR_PORTS,
MOTOR_REVERSE_TIME,
OUTPUT_PORTS,
RELAY_PORTS,
S0_INPUTS,
SETPOINTS,
THRESHOLDS,
VAR_UNITS,
VARIABLES,
)
from .helpers import has_unique_connection_names, is_address
from .services import (
DynText,
Led,
LockKeys,
LockRegulator,
OutputAbs,
OutputRel,
OutputToggle,
Pck,
Relays,
SendKeys,
VarAbs,
VarRel,
VarReset,
)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper, vol.In(SETPOINTS + KEYS + BINSENSOR_PORTS)
),
}
)
CLIMATES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(vol.Upper, vol.In(VARIABLES)),
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS)),
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_LOCKABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): vol.In(
TEMP_CELSIUS, TEMP_FAHRENHEIT
),
}
)
COVERS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_MOTOR): vol.All(vol.Upper, vol.In(MOTOR_PORTS)),
vol.Optional(CONF_REVERSE_TIME): vol.All(vol.Upper, vol.In(MOTOR_REVERSE_TIME)),
}
)
LIGHTS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
vol.Optional(CONF_DIMMABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0), lambda value: value * 1000
),
}
)
SCENES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_REGISTER): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Required(CONF_SCENE): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Optional(CONF_OUTPUTS): vol.All(
cv.ensure_list, [vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS))]
),
vol.Optional(CONF_TRANSITION, default=None): vol.Any(
vol.All(
vol.Coerce(int),
vol.Range(min=0.0, max=486.0),
lambda value: value * 1000,
),
None,
),
}
)
SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper,
vol.In(
VARIABLES
+ SETPOINTS
+ THRESHOLDS
+ S0_INPUTS
+ LED_PORTS
+ LOGICOP_PORTS
),
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
SWITCHES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
}
)
CONNECTION_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SK_NUM_TRIES, default=0): cv.positive_int,
vol.Optional(CONF_DIM_MODE, default="steps50"): vol.All(
vol.Upper, vol.In(DIM_MODES)
),
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONNECTIONS): vol.All(
cv.ensure_list, has_unique_connection_names, [CONNECTION_SCHEMA]
),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSORS_SCHEMA]
),
vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATES_SCHEMA]),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
vol.Optional(CONF_LIGHTS): vol.All(cv.ensure_list, [LIGHTS_SCHEMA]),
vol.Optional(CONF_SCENES): vol.All(cv.ensure_list, [SCENES_SCHEMA]),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSORS_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCHES_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the LCN component."""
hass.data[DATA_LCN] = {}
conf_connections = config[DOMAIN][CONF_CONNECTIONS]
connections = []
for conf_connection in conf_connections:
connection_name = conf_connection.get(CONF_NAME)
settings = {
"SK_NUM_TRIES": conf_connection[CONF_SK_NUM_TRIES],
"DIM_MODE": pypck.lcn_defs.OutputPortDimMode[
conf_connection[CONF_DIM_MODE]
],
}
connection = pypck.connection.PchkConnectionManager(
hass.loop,
conf_connection[CONF_HOST],
conf_connection[CONF_PORT],
conf_connection[CONF_USERNAME],
conf_connection[CONF_PASSWORD],
settings=settings,
connection_id=connection_name,
)
try:
# establish connection to PCHK server
await hass.async_create_task(connection.async_connect(timeout=15))
connections.append(connection)
_LOGGER.info('LCN connected to "%s"', connection_name)
except TimeoutError:
_LOGGER.error('Connection to PCHK server "%s" failed.', connection_name)
return False
hass.data[DATA_LCN][CONF_CONNECTIONS] = connections
# load platforms
for component, conf_key in (
("binary_sensor", CONF_BINARY_SENSORS),
("climate", CONF_CLIMATES),
("cover", CONF_COVERS),
("light", CONF_LIGHTS),
("scene", CONF_SCENES),
("sensor", CONF_SENSORS),
("switch", CONF_SWITCHES),
):
if conf_key in config[DOMAIN]:
hass.async_create_task(
async_load_platform(
hass, component, DOMAIN, config[DOMAIN][conf_key], config
)
)
# register service calls
for service_name, service in (
("output_abs", OutputAbs),
("output_rel", OutputRel),
("output_toggle", OutputToggle),
("relays", Relays),
("var_abs", VarAbs),
("var_reset", VarReset),
("var_rel", VarRel),
("lock_regulator", LockRegulator),
("led", Led),
("send_keys", SendKeys),
("lock_keys", LockKeys),
("dyn_text", DynText),
("pck", Pck),
):
hass.services.async_register(
DOMAIN, service_name, service(hass), service.schema
)
return True
class LcnDevice(Entity):
"""Parent class for all devices associated with the LCN component."""
def __init__(self, config, address_connection):
"""Initialize the LCN device."""
self.config = config
self.address_connection = address_connection
self._name = config[CONF_NAME]
@property
def should_poll(self):
"""Lcn device entity pushes its state to HA."""
return False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
self.address_connection.register_for_inputs(self.input_received)
@property
def name(self):
"""Return the name of the device."""
return self._name
def input_received(self, input_obj):
"""Set state/value when LCN input object (command) is received."""
raise NotImplementedError("Pure virtual function.")
| 29.453416
| 88
| 0.614614
|
import logging
import pypck
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COVERS,
CONF_HOST,
CONF_LIGHTS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.entity import Entity
from .const import (
BINSENSOR_PORTS,
CONF_CLIMATES,
CONF_CONNECTIONS,
CONF_DIM_MODE,
CONF_DIMMABLE,
CONF_LOCKABLE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_MOTOR,
CONF_OUTPUT,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_REVERSE_TIME,
CONF_SCENE,
CONF_SCENES,
CONF_SETPOINT,
CONF_SK_NUM_TRIES,
CONF_SOURCE,
CONF_TRANSITION,
DATA_LCN,
DIM_MODES,
DOMAIN,
KEYS,
LED_PORTS,
LOGICOP_PORTS,
MOTOR_PORTS,
MOTOR_REVERSE_TIME,
OUTPUT_PORTS,
RELAY_PORTS,
S0_INPUTS,
SETPOINTS,
THRESHOLDS,
VAR_UNITS,
VARIABLES,
)
from .helpers import has_unique_connection_names, is_address
from .services import (
DynText,
Led,
LockKeys,
LockRegulator,
OutputAbs,
OutputRel,
OutputToggle,
Pck,
Relays,
SendKeys,
VarAbs,
VarRel,
VarReset,
)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper, vol.In(SETPOINTS + KEYS + BINSENSOR_PORTS)
),
}
)
CLIMATES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(vol.Upper, vol.In(VARIABLES)),
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS)),
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_LOCKABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): vol.In(
TEMP_CELSIUS, TEMP_FAHRENHEIT
),
}
)
COVERS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_MOTOR): vol.All(vol.Upper, vol.In(MOTOR_PORTS)),
vol.Optional(CONF_REVERSE_TIME): vol.All(vol.Upper, vol.In(MOTOR_REVERSE_TIME)),
}
)
LIGHTS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
vol.Optional(CONF_DIMMABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0), lambda value: value * 1000
),
}
)
SCENES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_REGISTER): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Required(CONF_SCENE): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Optional(CONF_OUTPUTS): vol.All(
cv.ensure_list, [vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS))]
),
vol.Optional(CONF_TRANSITION, default=None): vol.Any(
vol.All(
vol.Coerce(int),
vol.Range(min=0.0, max=486.0),
lambda value: value * 1000,
),
None,
),
}
)
SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper,
vol.In(
VARIABLES
+ SETPOINTS
+ THRESHOLDS
+ S0_INPUTS
+ LED_PORTS
+ LOGICOP_PORTS
),
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
SWITCHES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
}
)
CONNECTION_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SK_NUM_TRIES, default=0): cv.positive_int,
vol.Optional(CONF_DIM_MODE, default="steps50"): vol.All(
vol.Upper, vol.In(DIM_MODES)
),
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONNECTIONS): vol.All(
cv.ensure_list, has_unique_connection_names, [CONNECTION_SCHEMA]
),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSORS_SCHEMA]
),
vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATES_SCHEMA]),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
vol.Optional(CONF_LIGHTS): vol.All(cv.ensure_list, [LIGHTS_SCHEMA]),
vol.Optional(CONF_SCENES): vol.All(cv.ensure_list, [SCENES_SCHEMA]),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSORS_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCHES_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
hass.data[DATA_LCN] = {}
conf_connections = config[DOMAIN][CONF_CONNECTIONS]
connections = []
for conf_connection in conf_connections:
connection_name = conf_connection.get(CONF_NAME)
settings = {
"SK_NUM_TRIES": conf_connection[CONF_SK_NUM_TRIES],
"DIM_MODE": pypck.lcn_defs.OutputPortDimMode[
conf_connection[CONF_DIM_MODE]
],
}
connection = pypck.connection.PchkConnectionManager(
hass.loop,
conf_connection[CONF_HOST],
conf_connection[CONF_PORT],
conf_connection[CONF_USERNAME],
conf_connection[CONF_PASSWORD],
settings=settings,
connection_id=connection_name,
)
try:
await hass.async_create_task(connection.async_connect(timeout=15))
connections.append(connection)
_LOGGER.info('LCN connected to "%s"', connection_name)
except TimeoutError:
_LOGGER.error('Connection to PCHK server "%s" failed.', connection_name)
return False
hass.data[DATA_LCN][CONF_CONNECTIONS] = connections
for component, conf_key in (
("binary_sensor", CONF_BINARY_SENSORS),
("climate", CONF_CLIMATES),
("cover", CONF_COVERS),
("light", CONF_LIGHTS),
("scene", CONF_SCENES),
("sensor", CONF_SENSORS),
("switch", CONF_SWITCHES),
):
if conf_key in config[DOMAIN]:
hass.async_create_task(
async_load_platform(
hass, component, DOMAIN, config[DOMAIN][conf_key], config
)
)
for service_name, service in (
("output_abs", OutputAbs),
("output_rel", OutputRel),
("output_toggle", OutputToggle),
("relays", Relays),
("var_abs", VarAbs),
("var_reset", VarReset),
("var_rel", VarRel),
("lock_regulator", LockRegulator),
("led", Led),
("send_keys", SendKeys),
("lock_keys", LockKeys),
("dyn_text", DynText),
("pck", Pck),
):
hass.services.async_register(
DOMAIN, service_name, service(hass), service.schema
)
return True
class LcnDevice(Entity):
def __init__(self, config, address_connection):
self.config = config
self.address_connection = address_connection
self._name = config[CONF_NAME]
@property
def should_poll(self):
return False
async def async_added_to_hass(self):
self.address_connection.register_for_inputs(self.input_received)
@property
def name(self):
return self._name
def input_received(self, input_obj):
raise NotImplementedError("Pure virtual function.")
| true
| true
|
f717047c1e24f5939fecd31454d00b32da770cfe
| 815
|
py
|
Python
|
profiles_project/urls.py
|
Andrei-Ionescu-001/profiles-rest-api1
|
fc5ceb57e3ef0dd00ab9c73b1ab65e5cee8f6c43
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
Andrei-Ionescu-001/profiles-rest-api1
|
fc5ceb57e3ef0dd00ab9c73b1ab65e5cee8f6c43
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
Andrei-Ionescu-001/profiles-rest-api1
|
fc5ceb57e3ef0dd00ab9c73b1ab65e5cee8f6c43
|
[
"MIT"
] | null | null | null |
"""profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("api/", include('profiles_api.urls')),
]
| 35.434783
| 77
| 0.707975
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("api/", include('profiles_api.urls')),
]
| true
| true
|
f7170601a4453e0e9695176e02bd85d687d2032e
| 408
|
py
|
Python
|
PYTHON/Desafios Python - Mundo 1/027.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
PYTHON/Desafios Python - Mundo 1/027.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
PYTHON/Desafios Python - Mundo 1/027.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
# Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome, separadamente.
#Ex. Ana Maria de Souza
# Primeiro = Ana
# Último = Souza
nome = str(input('Digite seu nome Completo: ')).strip().upper()
n = nome.split()
print('Seu nome completo é: {}'.format(nome))
print('Seu primeiro nome é: {}'.format(n[0]))
print('Seu último nome é: {}'.format(n[len(n)-1]))
| 31.384615
| 122
| 0.686275
|
nome = str(input('Digite seu nome Completo: ')).strip().upper()
n = nome.split()
print('Seu nome completo é: {}'.format(nome))
print('Seu primeiro nome é: {}'.format(n[0]))
print('Seu último nome é: {}'.format(n[len(n)-1]))
| true
| true
|
f717065229b5aa3e8bbbf8152fb8d179944a277e
| 395
|
py
|
Python
|
apps/article/migrations/0002_auto_20190227_1556.py
|
sakisrealop/website
|
43a12492a90f1f03ec69a60b74f73a5fa7f4af75
|
[
"MIT"
] | 312
|
2019-02-22T02:12:39.000Z
|
2022-03-28T03:51:44.000Z
|
apps/article/migrations/0002_auto_20190227_1556.py
|
Atheny/website
|
28cf598488465695e4d1e2691e8d162b9c253e27
|
[
"MIT"
] | 21
|
2019-04-01T04:38:50.000Z
|
2022-03-11T23:43:55.000Z
|
apps/article/migrations/0002_auto_20190227_1556.py
|
Atheny/website
|
28cf598488465695e4d1e2691e8d162b9c253e27
|
[
"MIT"
] | 97
|
2019-01-29T02:23:09.000Z
|
2022-02-26T17:25:41.000Z
|
# Generated by Django 2.0.8 on 2019-02-27 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category_article',
name='add_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| 20.789474
| 58
| 0.607595
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category_article',
name='add_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| true
| true
|
f7170654671a68ef6a00e9dc095703a97a4c6389
| 13,779
|
py
|
Python
|
run_alicevision.py
|
sinithwar/Ruby_Run_Alice_Vision
|
5ad3e6b02b1bca962cdcdbda1a6d7f1b31474667
|
[
"CC0-1.0"
] | null | null | null |
run_alicevision.py
|
sinithwar/Ruby_Run_Alice_Vision
|
5ad3e6b02b1bca962cdcdbda1a6d7f1b31474667
|
[
"CC0-1.0"
] | null | null | null |
run_alicevision.py
|
sinithwar/Ruby_Run_Alice_Vision
|
5ad3e6b02b1bca962cdcdbda1a6d7f1b31474667
|
[
"CC0-1.0"
] | null | null | null |
import sys, os
import shutil
def SilentMkdir(theDir):
try:
os.mkdir(theDir)
except:
pass
return 0
def Run_00_CameraInit(baseDir,binDir,srcImageDir):
SilentMkdir(baseDir + "/00_CameraInit")
binName = binDir + "\\aliceVision_cameraInit.exe"
dstDir = baseDir + "/00_CameraInit/"
cmdLine = binName
cmdLine = cmdLine + " --defaultFieldOfView 45.0 --verboseLevel info --sensorDatabase \"\" --allowSingleView 1"
cmdLine = cmdLine + " --imageFolder \"" + srcImageDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "cameraInit.sfm\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_01_FeatureExtraction(baseDir,binDir, numImages):
SilentMkdir(baseDir + "/01_FeatureExtraction")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
binName = binDir + "\\aliceVision_featureExtraction.exe"
dstDir = baseDir + "/01_FeatureExtraction/"
cmdLine = binName
cmdLine = cmdLine + " --describerTypes sift --forceCpuExtraction True --verboseLevel info --describerPreset normal"
cmdLine = cmdLine + " --rangeStart 0 --rangeSize " + str(numImages)
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_02_ImageMatching(baseDir,binDir):
SilentMkdir(baseDir + "/02_ImageMatching")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
dstMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
binName = binDir + "\\aliceVision_imageMatching.exe"
cmdLine = binName
cmdLine = cmdLine + " --minNbImages 200 --tree "" --maxDescriptors 500 --verboseLevel info --weights "" --nbMatches 50"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolder \"" + srcFeatures + "\""
cmdLine = cmdLine + " --output \"" + dstMatches + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_03_FeatureMatching(baseDir,binDir):
SilentMkdir(baseDir + "/03_FeatureMatching")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
srcImageMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
dstMatches = baseDir + "/03_FeatureMatching"
binName = binDir + "\\aliceVision_featureMatching.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info --describerTypes sift --maxMatches 0 --exportDebugFiles False --savePutativeMatches False --guidedMatching False"
cmdLine = cmdLine + " --geometricEstimator acransac --geometricFilterType fundamental_matrix --maxIteration 2048 --distanceRatio 0.8"
cmdLine = cmdLine + " --photometricMatchingMethod ANN_L2"
cmdLine = cmdLine + " --imagePairsList \"" + srcImageMatches + "\""
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolders \"" + srcFeatures + "\""
cmdLine = cmdLine + " --output \"" + dstMatches + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_04_StructureFromMotion(baseDir,binDir):
SilentMkdir(baseDir + "/04_StructureFromMotion")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
srcImageMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
srcMatches = baseDir + "/03_FeatureMatching"
dstDir = baseDir + "/04_StructureFromMotion"
binName = binDir + "\\aliceVision_incrementalSfm.exe"
cmdLine = binName
cmdLine = cmdLine + " --minAngleForLandmark 2.0 --minNumberOfObservationsForTriangulation 2 --maxAngleInitialPair 40.0 --maxNumberOfMatches 0 --localizerEstimator acransac --describerTypes sift --lockScenePreviouslyReconstructed False --localBAGraphDistance 1"
cmdLine = cmdLine + " --initialPairA "" --initialPairB "" --interFileExtension .ply --useLocalBA True"
cmdLine = cmdLine + " --minInputTrackLength 2 --useOnlyMatchesFromInputFolder False --verboseLevel info --minAngleForTriangulation 3.0 --maxReprojectionError 4.0 --minAngleInitialPair 5.0"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolders \"" + srcFeatures + "\""
cmdLine = cmdLine + " --matchesFolders \"" + srcMatches + "\""
cmdLine = cmdLine + " --outputViewsAndPoses \"" + dstDir + "/cameras.sfm\""
cmdLine = cmdLine + " --extraInfoFolder \"" + dstDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "/bundle.sfm\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_05_PrepareDenseScene(baseDir,binDir):
SilentMkdir(baseDir + "/05_PrepareDenseScene")
#srcSfm = baseDir + "/04_StructureFromMotion/cameras.sfm"
srcSfm = baseDir + "/04_StructureFromMotion/bundle.sfm"
dstDir = baseDir + "/05_PrepareDenseScene"
binName = binDir + "\\aliceVision_prepareDenseScene.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --output \"" + dstDir +"\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_06_CameraConnection(baseDir,binDir):
SilentMkdir(baseDir + "/06_CameraConnection")
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
# This step kindof breaks the directory structure. Tt creates
# a camsPairsMatrixFromSeeds.bin file in in the same file as mvs.ini
binName = binDir + "\\aliceVision_cameraConnection.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_07_DepthMap(baseDir,binDir,numImages,groupSize):
SilentMkdir(baseDir + "/07_DepthMap")
numGroups = (numImages + (groupSize-1))/groupSize
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
binName = binDir + "\\aliceVision_depthMapEstimation.exe"
dstDir = baseDir + "/07_DepthMap"
cmdLine = binName
cmdLine = cmdLine + " --sgmGammaC 5.5 --sgmWSH 4 --refineGammaP 8.0 --refineSigma 15 --refineNSamplesHalf 150 --sgmMaxTCams 10 --refineWSH 3 --downscale 2 --refineMaxTCams 6 --verboseLevel info --refineGammaC 15.5 --sgmGammaP 8.0"
cmdLine = cmdLine + " --refineNiters 100 --refineNDepthsToRefine 31 --refineUseTcOrRcPixSize False"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
for groupIter in range(numGroups):
groupStart = groupSize * groupIter
groupSize = min(groupSize,numImages - groupStart)
print("DepthMap Group %d/%d: %d, %d" % (groupIter, numGroups, groupStart, groupSize))
cmd = cmdLine + (" --rangeStart %d --rangeSize %d" % (groupStart,groupSize))
print(cmd)
os.system(cmd)
#cmd = "aliceVision_depthMapEstimation --sgmGammaC 5.5 --sgmWSH 4 --refineGammaP 8.0 --refineSigma 15 --refineNSamplesHalf 150 --sgmMaxTCams 10 --refineWSH 3 --downscale 2 --refineMaxTCams 6 --verboseLevel info --refineGammaC 15.5 --sgmGammaP 8.0 --ini \"c:/users/geforce/appdata/local/temp/MeshroomCache/PrepareDenseScene/4f0d6d9f9d072ed05337fd7c670811b1daa00e62/mvs.ini\" --refineNiters 100 --refineNDepthsToRefine 31 --refineUseTcOrRcPixSize False --output \"c:/users/geforce/appdata/local/temp/MeshroomCache/DepthMap/18f3bd0a90931bd749b5eda20c8bf9f6dab63af9\" --rangeStart 0 --rangeSize 3"
#cmd = binName + " --sgmGammaC 5.5 --sgmWSH 4 --refineGammaP 8.0 --refineSigma 15 --refineNSamplesHalf 150 --sgmMaxTCams 10 --refineWSH 3 --downscale 2 --refineMaxTCams 6 --verboseLevel info --refineGammaC 15.5 --sgmGammaP 8.0 --ini \"c:/users/geforce/appdata/local/temp/MeshroomCache/PrepareDenseScene/4f0d6d9f9d072ed05337fd7c670811b1daa00e62/mvs.ini\" --refineNiters 100 --refineNDepthsToRefine 31 --refineUseTcOrRcPixSize False --output \"build_files/07_DepthMap/\" --rangeStart 0 --rangeSize 3"
#cmd = binName + " --sgmGammaC 5.5 --sgmWSH 4 --refineGammaP 8.0 --refineSigma 15 --refineNSamplesHalf 150 --sgmMaxTCams 10 --refineWSH 3 --downscale 2 --refineMaxTCams 6 --verboseLevel info --refineGammaC 15.5 --sgmGammaP 8.0 --ini \"" + srcIni + "\" --refineNiters 100 --refineNDepthsToRefine 31 --refineUseTcOrRcPixSize False --output \"build_files/07_DepthMap/\" --rangeStart 0 --rangeSize 3"
#print(cmd)
#os.system(cmd)
return 0
def Run_08_DepthMapFilter(baseDir,binDir):
SilentMkdir(baseDir + "/08_DepthMapFilter")
binName = binDir + "\\aliceVision_depthMapFiltering.exe"
dstDir = baseDir + "/08_DepthMapFilter"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
srcDepthDir = baseDir + "/07_DepthMap"
cmdLine = binName
cmdLine = cmdLine + " --minNumOfConsistensCamsWithLowSimilarity 4"
cmdLine = cmdLine + " --minNumOfConsistensCams 3 --verboseLevel info --pixSizeBall 0"
cmdLine = cmdLine + " --pixSizeBallWithLowSimilarity 0 --nNearestCams 10"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
cmdLine = cmdLine + " --depthMapFolder \"" + srcDepthDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_09_Meshing(baseDir,binDir):
SilentMkdir(baseDir + "/09_Meshing")
binName = binDir + "\\aliceVision_meshing.exe"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
srcDepthFilterDir = baseDir + "/08_DepthMapFilter"
srcDepthMapDir = baseDir + "/07_DepthMap"
dstDir = baseDir + "/09_Meshing"
cmdLine = binName
cmdLine = cmdLine + " --simGaussianSizeInit 10.0 --maxInputPoints 50000000 --repartition multiResolution"
cmdLine = cmdLine + " --simGaussianSize 10.0 --simFactor 15.0 --voteMarginFactor 4.0 --contributeMarginFactor 2.0 --minStep 2 --pixSizeMarginFinalCoef 4.0 --maxPoints 5000000 --maxPointsPerVoxel 1000000 --angleFactor 15.0 --partitioning singleBlock"
cmdLine = cmdLine + " --minAngleThreshold 1.0 --pixSizeMarginInitCoef 2.0 --refineFuse True --verboseLevel info"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --depthMapFilterFolder \"" + srcDepthFilterDir + "\""
cmdLine = cmdLine + " --depthMapFolder \"" + srcDepthMapDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "/mesh.obj\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_10_MeshFiltering(baseDir,binDir):
SilentMkdir(baseDir + "/10_MeshFiltering")
binName = binDir + "\\aliceVision_meshFiltering.exe"
srcMesh = baseDir + "/09_Meshing/mesh.obj"
dstMesh = baseDir + "/10_MeshFiltering/mesh.obj"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info --removeLargeTrianglesFactor 60.0 --iterations 5 --keepLargestMeshOnly True"
cmdLine = cmdLine + " --lambda 1.0"
cmdLine = cmdLine + " --input \"" + srcMesh + "\""
cmdLine = cmdLine + " --output \"" + dstMesh + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_11_Texturing(baseDir,binDir):
SilentMkdir(baseDir + "/11_Texturing")
binName = binDir + "\\aliceVision_texturing.exe"
srcMesh = baseDir + "/10_MeshFiltering/mesh.obj"
srcRecon = baseDir + "/09_Meshing/denseReconstruction.bin"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
dstDir = baseDir + "/11_Texturing"
cmdLine = binName
cmdLine = cmdLine + " --textureSide 8192"
cmdLine = cmdLine + " --downscale 2 --verboseLevel info --padding 15"
cmdLine = cmdLine + " --unwrapMethod Basic --outputTextureFileType png --flipNormals False --fillHoles False"
cmdLine = cmdLine + " --inputDenseReconstruction \"" + srcRecon + "\""
cmdLine = cmdLine + " --inputMesh \"" + srcMesh + "\""
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def main():
print("Prepping Scan, v2.")
print(sys.argv)
print (len(sys.argv))
if (len(sys.argv) != 6):
print("usage: python run_alicevision.py <baseDir> <imgDir> <binDir> <numImages> <runStep>")
print("Must pass 6 arguments.")
sys.exit(0)
baseDir = sys.argv[1]
srcImageDir = sys.argv[2]
binDir = sys.argv[3]
numImages = int(sys.argv[4])
runStep = sys.argv[5]
print("Base dir : %s" % baseDir)
print("Image dir : %s" % srcImageDir)
print("Bin dir : %s" % binDir)
print("Num images: %d" % numImages)
print("Step : %s" % runStep)
SilentMkdir(baseDir)
if runStep == "runall":
Run_00_CameraInit(baseDir,binDir,srcImageDir)
Run_01_FeatureExtraction(baseDir,binDir,numImages)
Run_02_ImageMatching(baseDir,binDir)
Run_03_FeatureMatching(baseDir,binDir)
Run_04_StructureFromMotion(baseDir,binDir)
Run_05_PrepareDenseScene(baseDir,binDir)
Run_06_CameraConnection(baseDir,binDir)
Run_07_DepthMap(baseDir,binDir,numImages,3)
Run_08_DepthMapFilter(baseDir,binDir)
Run_09_Meshing(baseDir,binDir)
Run_10_MeshFiltering(baseDir,binDir)
Run_11_Texturing(baseDir,binDir)
elif runStep == "run00":
Run_00_CameraInit(baseDir,binDir,srcImageDir)
elif runStep == "run01":
Run_01_FeatureExtraction(baseDir,binDir,numImages)
elif runStep == "run02":
Run_02_ImageMatching(baseDir,binDir)
elif runStep == "run03":
Run_03_FeatureMatching(baseDir,binDir)
elif runStep == "run04":
Run_04_StructureFromMotion(baseDir,binDir)
elif runStep == "run05":
Run_05_PrepareDenseScene(baseDir,binDir)
elif runStep == "run06":
Run_06_CameraConnection(baseDir,binDir)
elif runStep == "run07":
Run_07_DepthMap(baseDir,binDir,numImages,3)
elif runStep == "run08":
Run_08_DepthMapFilter(baseDir,binDir)
elif runStep == "run09":
Run_09_Meshing(baseDir,binDir)
elif runStep == "run10":
Run_10_MeshFiltering(baseDir,binDir)
elif runStep == "run11":
Run_11_Texturing(baseDir,binDir)
else:
print("Invalid Step: %s" % runStep)
#print("running")
#Run_00_CameraInit(baseDir,binDir,srcImageDir)
#Run_01_FeatureExtraction(baseDir,binDir,numImages)
#Run_02_ImageMatching(baseDir,binDir)
#Run_03_FeatureMatching(baseDir,binDir)
#Run_04_StructureFromMotion(baseDir,binDir)
#Run_05_PrepareDenseScene(baseDir,binDir)
#Run_06_CameraConnection(baseDir,binDir)
#Run_07_DepthMap(baseDir,binDir,numImages,3)
#Run_08_DepthMapFilter(baseDir,binDir)
#Run_09_Meshing(baseDir,binDir)
#Run_10_MeshFiltering(baseDir,binDir)
#Run_11_Texturing(baseDir,binDir)
return 0
main()
| 36.165354
| 595
| 0.725016
|
import sys, os
import shutil
def SilentMkdir(theDir):
try:
os.mkdir(theDir)
except:
pass
return 0
def Run_00_CameraInit(baseDir,binDir,srcImageDir):
SilentMkdir(baseDir + "/00_CameraInit")
binName = binDir + "\\aliceVision_cameraInit.exe"
dstDir = baseDir + "/00_CameraInit/"
cmdLine = binName
cmdLine = cmdLine + " --defaultFieldOfView 45.0 --verboseLevel info --sensorDatabase \"\" --allowSingleView 1"
cmdLine = cmdLine + " --imageFolder \"" + srcImageDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "cameraInit.sfm\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_01_FeatureExtraction(baseDir,binDir, numImages):
SilentMkdir(baseDir + "/01_FeatureExtraction")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
binName = binDir + "\\aliceVision_featureExtraction.exe"
dstDir = baseDir + "/01_FeatureExtraction/"
cmdLine = binName
cmdLine = cmdLine + " --describerTypes sift --forceCpuExtraction True --verboseLevel info --describerPreset normal"
cmdLine = cmdLine + " --rangeStart 0 --rangeSize " + str(numImages)
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_02_ImageMatching(baseDir,binDir):
SilentMkdir(baseDir + "/02_ImageMatching")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
dstMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
binName = binDir + "\\aliceVision_imageMatching.exe"
cmdLine = binName
cmdLine = cmdLine + " --minNbImages 200 --tree "" --maxDescriptors 500 --verboseLevel info --weights "" --nbMatches 50"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolder \"" + srcFeatures + "\""
cmdLine = cmdLine + " --output \"" + dstMatches + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_03_FeatureMatching(baseDir,binDir):
SilentMkdir(baseDir + "/03_FeatureMatching")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
srcImageMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
dstMatches = baseDir + "/03_FeatureMatching"
binName = binDir + "\\aliceVision_featureMatching.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info --describerTypes sift --maxMatches 0 --exportDebugFiles False --savePutativeMatches False --guidedMatching False"
cmdLine = cmdLine + " --geometricEstimator acransac --geometricFilterType fundamental_matrix --maxIteration 2048 --distanceRatio 0.8"
cmdLine = cmdLine + " --photometricMatchingMethod ANN_L2"
cmdLine = cmdLine + " --imagePairsList \"" + srcImageMatches + "\""
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolders \"" + srcFeatures + "\""
cmdLine = cmdLine + " --output \"" + dstMatches + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_04_StructureFromMotion(baseDir,binDir):
SilentMkdir(baseDir + "/04_StructureFromMotion")
srcSfm = baseDir + "/00_CameraInit/cameraInit.sfm"
srcFeatures = baseDir + "/01_FeatureExtraction/"
srcImageMatches = baseDir + "/02_ImageMatching/imageMatches.txt"
srcMatches = baseDir + "/03_FeatureMatching"
dstDir = baseDir + "/04_StructureFromMotion"
binName = binDir + "\\aliceVision_incrementalSfm.exe"
cmdLine = binName
cmdLine = cmdLine + " --minAngleForLandmark 2.0 --minNumberOfObservationsForTriangulation 2 --maxAngleInitialPair 40.0 --maxNumberOfMatches 0 --localizerEstimator acransac --describerTypes sift --lockScenePreviouslyReconstructed False --localBAGraphDistance 1"
cmdLine = cmdLine + " --initialPairA "" --initialPairB "" --interFileExtension .ply --useLocalBA True"
cmdLine = cmdLine + " --minInputTrackLength 2 --useOnlyMatchesFromInputFolder False --verboseLevel info --minAngleForTriangulation 3.0 --maxReprojectionError 4.0 --minAngleInitialPair 5.0"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --featuresFolders \"" + srcFeatures + "\""
cmdLine = cmdLine + " --matchesFolders \"" + srcMatches + "\""
cmdLine = cmdLine + " --outputViewsAndPoses \"" + dstDir + "/cameras.sfm\""
cmdLine = cmdLine + " --extraInfoFolder \"" + dstDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "/bundle.sfm\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_05_PrepareDenseScene(baseDir,binDir):
SilentMkdir(baseDir + "/05_PrepareDenseScene")
srcSfm = baseDir + "/04_StructureFromMotion/bundle.sfm"
dstDir = baseDir + "/05_PrepareDenseScene"
binName = binDir + "\\aliceVision_prepareDenseScene.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info"
cmdLine = cmdLine + " --input \"" + srcSfm + "\""
cmdLine = cmdLine + " --output \"" + dstDir +"\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_06_CameraConnection(baseDir,binDir):
SilentMkdir(baseDir + "/06_CameraConnection")
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
binName = binDir + "\\aliceVision_cameraConnection.exe"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_07_DepthMap(baseDir,binDir,numImages,groupSize):
SilentMkdir(baseDir + "/07_DepthMap")
numGroups = (numImages + (groupSize-1))/groupSize
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
binName = binDir + "\\aliceVision_depthMapEstimation.exe"
dstDir = baseDir + "/07_DepthMap"
cmdLine = binName
cmdLine = cmdLine + " --sgmGammaC 5.5 --sgmWSH 4 --refineGammaP 8.0 --refineSigma 15 --refineNSamplesHalf 150 --sgmMaxTCams 10 --refineWSH 3 --downscale 2 --refineMaxTCams 6 --verboseLevel info --refineGammaC 15.5 --sgmGammaP 8.0"
cmdLine = cmdLine + " --refineNiters 100 --refineNDepthsToRefine 31 --refineUseTcOrRcPixSize False"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
for groupIter in range(numGroups):
groupStart = groupSize * groupIter
groupSize = min(groupSize,numImages - groupStart)
print("DepthMap Group %d/%d: %d, %d" % (groupIter, numGroups, groupStart, groupSize))
cmd = cmdLine + (" --rangeStart %d --rangeSize %d" % (groupStart,groupSize))
print(cmd)
os.system(cmd)
return 0
def Run_08_DepthMapFilter(baseDir,binDir):
SilentMkdir(baseDir + "/08_DepthMapFilter")
binName = binDir + "\\aliceVision_depthMapFiltering.exe"
dstDir = baseDir + "/08_DepthMapFilter"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
srcDepthDir = baseDir + "/07_DepthMap"
cmdLine = binName
cmdLine = cmdLine + " --minNumOfConsistensCamsWithLowSimilarity 4"
cmdLine = cmdLine + " --minNumOfConsistensCams 3 --verboseLevel info --pixSizeBall 0"
cmdLine = cmdLine + " --pixSizeBallWithLowSimilarity 0 --nNearestCams 10"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
cmdLine = cmdLine + " --depthMapFolder \"" + srcDepthDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_09_Meshing(baseDir,binDir):
SilentMkdir(baseDir + "/09_Meshing")
binName = binDir + "\\aliceVision_meshing.exe"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
srcDepthFilterDir = baseDir + "/08_DepthMapFilter"
srcDepthMapDir = baseDir + "/07_DepthMap"
dstDir = baseDir + "/09_Meshing"
cmdLine = binName
cmdLine = cmdLine + " --simGaussianSizeInit 10.0 --maxInputPoints 50000000 --repartition multiResolution"
cmdLine = cmdLine + " --simGaussianSize 10.0 --simFactor 15.0 --voteMarginFactor 4.0 --contributeMarginFactor 2.0 --minStep 2 --pixSizeMarginFinalCoef 4.0 --maxPoints 5000000 --maxPointsPerVoxel 1000000 --angleFactor 15.0 --partitioning singleBlock"
cmdLine = cmdLine + " --minAngleThreshold 1.0 --pixSizeMarginInitCoef 2.0 --refineFuse True --verboseLevel info"
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --depthMapFilterFolder \"" + srcDepthFilterDir + "\""
cmdLine = cmdLine + " --depthMapFolder \"" + srcDepthMapDir + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "/mesh.obj\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_10_MeshFiltering(baseDir,binDir):
SilentMkdir(baseDir + "/10_MeshFiltering")
binName = binDir + "\\aliceVision_meshFiltering.exe"
srcMesh = baseDir + "/09_Meshing/mesh.obj"
dstMesh = baseDir + "/10_MeshFiltering/mesh.obj"
cmdLine = binName
cmdLine = cmdLine + " --verboseLevel info --removeLargeTrianglesFactor 60.0 --iterations 5 --keepLargestMeshOnly True"
cmdLine = cmdLine + " --lambda 1.0"
cmdLine = cmdLine + " --input \"" + srcMesh + "\""
cmdLine = cmdLine + " --output \"" + dstMesh + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def Run_11_Texturing(baseDir,binDir):
SilentMkdir(baseDir + "/11_Texturing")
binName = binDir + "\\aliceVision_texturing.exe"
srcMesh = baseDir + "/10_MeshFiltering/mesh.obj"
srcRecon = baseDir + "/09_Meshing/denseReconstruction.bin"
srcIni = baseDir + "/05_PrepareDenseScene/mvs.ini"
dstDir = baseDir + "/11_Texturing"
cmdLine = binName
cmdLine = cmdLine + " --textureSide 8192"
cmdLine = cmdLine + " --downscale 2 --verboseLevel info --padding 15"
cmdLine = cmdLine + " --unwrapMethod Basic --outputTextureFileType png --flipNormals False --fillHoles False"
cmdLine = cmdLine + " --inputDenseReconstruction \"" + srcRecon + "\""
cmdLine = cmdLine + " --inputMesh \"" + srcMesh + "\""
cmdLine = cmdLine + " --ini \"" + srcIni + "\""
cmdLine = cmdLine + " --output \"" + dstDir + "\""
print(cmdLine)
os.system(cmdLine)
return 0
def main():
print("Prepping Scan, v2.")
print(sys.argv)
print (len(sys.argv))
if (len(sys.argv) != 6):
print("usage: python run_alicevision.py <baseDir> <imgDir> <binDir> <numImages> <runStep>")
print("Must pass 6 arguments.")
sys.exit(0)
baseDir = sys.argv[1]
srcImageDir = sys.argv[2]
binDir = sys.argv[3]
numImages = int(sys.argv[4])
runStep = sys.argv[5]
print("Base dir : %s" % baseDir)
print("Image dir : %s" % srcImageDir)
print("Bin dir : %s" % binDir)
print("Num images: %d" % numImages)
print("Step : %s" % runStep)
SilentMkdir(baseDir)
if runStep == "runall":
Run_00_CameraInit(baseDir,binDir,srcImageDir)
Run_01_FeatureExtraction(baseDir,binDir,numImages)
Run_02_ImageMatching(baseDir,binDir)
Run_03_FeatureMatching(baseDir,binDir)
Run_04_StructureFromMotion(baseDir,binDir)
Run_05_PrepareDenseScene(baseDir,binDir)
Run_06_CameraConnection(baseDir,binDir)
Run_07_DepthMap(baseDir,binDir,numImages,3)
Run_08_DepthMapFilter(baseDir,binDir)
Run_09_Meshing(baseDir,binDir)
Run_10_MeshFiltering(baseDir,binDir)
Run_11_Texturing(baseDir,binDir)
elif runStep == "run00":
Run_00_CameraInit(baseDir,binDir,srcImageDir)
elif runStep == "run01":
Run_01_FeatureExtraction(baseDir,binDir,numImages)
elif runStep == "run02":
Run_02_ImageMatching(baseDir,binDir)
elif runStep == "run03":
Run_03_FeatureMatching(baseDir,binDir)
elif runStep == "run04":
Run_04_StructureFromMotion(baseDir,binDir)
elif runStep == "run05":
Run_05_PrepareDenseScene(baseDir,binDir)
elif runStep == "run06":
Run_06_CameraConnection(baseDir,binDir)
elif runStep == "run07":
Run_07_DepthMap(baseDir,binDir,numImages,3)
elif runStep == "run08":
Run_08_DepthMapFilter(baseDir,binDir)
elif runStep == "run09":
Run_09_Meshing(baseDir,binDir)
elif runStep == "run10":
Run_10_MeshFiltering(baseDir,binDir)
elif runStep == "run11":
Run_11_Texturing(baseDir,binDir)
else:
print("Invalid Step: %s" % runStep)
return 0
main()
| true
| true
|
f71707097c636c343b5861abc9032e2e393f7ac3
| 2,560
|
py
|
Python
|
lib/bullet/src/examples/pybullet/inverse_kinematics.py
|
mtesseracttech/CustomEngine
|
1a9ed564408ae29fe49681a810b851403d71f486
|
[
"Apache-2.0"
] | 5
|
2016-07-04T15:00:01.000Z
|
2021-01-19T02:42:48.000Z
|
lib/bullet/src/examples/pybullet/inverse_kinematics.py
|
mtesseracttech/CustomEngine
|
1a9ed564408ae29fe49681a810b851403d71f486
|
[
"Apache-2.0"
] | null | null | null |
lib/bullet/src/examples/pybullet/inverse_kinematics.py
|
mtesseracttech/CustomEngine
|
1a9ed564408ae29fe49681a810b851403d71f486
|
[
"Apache-2.0"
] | 1
|
2016-04-23T07:47:32.000Z
|
2016-04-23T07:47:32.000Z
|
import pybullet as p
import time
import math
from datetime import datetime
#clid = p.connect(p.SHARED_MEMORY)
p.connect(p.GUI)
p.loadURDF("plane.urdf",[0,0,-0.3])
kukaId = p.loadURDF("kuka_iiwa/model.urdf",[0,0,0])
p.resetBasePositionAndOrientation(kukaId,[0,0,0],[0,0,0,1])
kukaEndEffectorIndex = 6
numJoints = p.getNumJoints(kukaId)
if (numJoints!=7):
exit()
#lower limits for null space
ll=[-.967,-2 ,-2.96,0.19,-2.96,-2.09,-3.05]
#upper limits for null space
ul=[.967,2 ,2.96,2.29,2.96,2.09,3.05]
#joint ranges for null space
jr=[5.8,4,5.8,4,5.8,4,6]
#restposes for null space
rp=[0,0,0,0.5*math.pi,0,-math.pi*0.5*0.66,0]
for i in range (numJoints):
p.resetJointState(kukaId,i,rp[i])
p.setGravity(0,0,0)
t=0.
prevPose=[0,0,0]
prevPose1=[0,0,0]
hasPrevPose = 0
useNullSpace = 0
useOrientation = 1
useSimulation = 1
useRealTimeSimulation = 1
p.setRealTimeSimulation(useRealTimeSimulation)
#trailDuration is duration (in seconds) after debug lines will be removed automatically
#use 0 for no-removal
trailDuration = 15
while 1:
if (useRealTimeSimulation):
dt = datetime.now()
t = (dt.second/60.)*2.*math.pi
else:
t=t+0.1
if (useSimulation and useRealTimeSimulation==0):
p.stepSimulation()
for i in range (1):
pos = [-0.4,0.2*math.cos(t),0.+0.2*math.sin(t)]
#end effector points down, not up (in case useOrientation==1)
orn = p.getQuaternionFromEuler([0,-math.pi,0])
if (useNullSpace==1):
if (useOrientation==1):
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,orn,ll,ul,jr,rp)
else:
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,lowerLimits=ll, upperLimits=ul, jointRanges=jr, restPoses=rp)
else:
if (useOrientation==1):
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,orn)
else:
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos)
if (useSimulation):
for i in range (numJoints):
p.setJointMotorControl2(bodyIndex=kukaId,jointIndex=i,controlMode=p.POSITION_CONTROL,targetPosition=jointPoses[i],targetVelocity=0,force=500,positionGain=0.03,velocityGain=1)
else:
#reset the joint state (ignoring all dynamics, not recommended to use during simulation)
for i in range (numJoints):
p.resetJointState(kukaId,i,jointPoses[i])
ls = p.getLinkState(kukaId,kukaEndEffectorIndex)
if (hasPrevPose):
p.addUserDebugLine(prevPose,pos,[0,0,0.3],1,trailDuration)
p.addUserDebugLine(prevPose1,ls[4],[1,0,0],1,trailDuration)
prevPose=pos
prevPose1=ls[4]
hasPrevPose = 1
| 29.767442
| 178
| 0.732813
|
import pybullet as p
import time
import math
from datetime import datetime
p.connect(p.GUI)
p.loadURDF("plane.urdf",[0,0,-0.3])
kukaId = p.loadURDF("kuka_iiwa/model.urdf",[0,0,0])
p.resetBasePositionAndOrientation(kukaId,[0,0,0],[0,0,0,1])
kukaEndEffectorIndex = 6
numJoints = p.getNumJoints(kukaId)
if (numJoints!=7):
exit()
ll=[-.967,-2 ,-2.96,0.19,-2.96,-2.09,-3.05]
ul=[.967,2 ,2.96,2.29,2.96,2.09,3.05]
jr=[5.8,4,5.8,4,5.8,4,6]
rp=[0,0,0,0.5*math.pi,0,-math.pi*0.5*0.66,0]
for i in range (numJoints):
p.resetJointState(kukaId,i,rp[i])
p.setGravity(0,0,0)
t=0.
prevPose=[0,0,0]
prevPose1=[0,0,0]
hasPrevPose = 0
useNullSpace = 0
useOrientation = 1
useSimulation = 1
useRealTimeSimulation = 1
p.setRealTimeSimulation(useRealTimeSimulation)
trailDuration = 15
while 1:
if (useRealTimeSimulation):
dt = datetime.now()
t = (dt.second/60.)*2.*math.pi
else:
t=t+0.1
if (useSimulation and useRealTimeSimulation==0):
p.stepSimulation()
for i in range (1):
pos = [-0.4,0.2*math.cos(t),0.+0.2*math.sin(t)]
orn = p.getQuaternionFromEuler([0,-math.pi,0])
if (useNullSpace==1):
if (useOrientation==1):
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,orn,ll,ul,jr,rp)
else:
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,lowerLimits=ll, upperLimits=ul, jointRanges=jr, restPoses=rp)
else:
if (useOrientation==1):
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos,orn)
else:
jointPoses = p.calculateInverseKinematics(kukaId,kukaEndEffectorIndex,pos)
if (useSimulation):
for i in range (numJoints):
p.setJointMotorControl2(bodyIndex=kukaId,jointIndex=i,controlMode=p.POSITION_CONTROL,targetPosition=jointPoses[i],targetVelocity=0,force=500,positionGain=0.03,velocityGain=1)
else:
for i in range (numJoints):
p.resetJointState(kukaId,i,jointPoses[i])
ls = p.getLinkState(kukaId,kukaEndEffectorIndex)
if (hasPrevPose):
p.addUserDebugLine(prevPose,pos,[0,0,0.3],1,trailDuration)
p.addUserDebugLine(prevPose1,ls[4],[1,0,0],1,trailDuration)
prevPose=pos
prevPose1=ls[4]
hasPrevPose = 1
| true
| true
|
f717072f58bacbac6ee72e5f50a62ccb40155d02
| 688
|
py
|
Python
|
bdlb/diabetic_retinopathy_diagnosis/__init__.py
|
jrubin01/bdl-benchmarks
|
1a6118750006febcd73a68af488a61673b6924b1
|
[
"Apache-2.0"
] | 1
|
2020-10-30T09:13:49.000Z
|
2020-10-30T09:13:49.000Z
|
bdlb/diabetic_retinopathy_diagnosis/__init__.py
|
jrubin01/bdl-benchmarks
|
1a6118750006febcd73a68af488a61673b6924b1
|
[
"Apache-2.0"
] | 5
|
2019-10-26T19:05:04.000Z
|
2022-03-30T08:07:46.000Z
|
bdlb/diabetic_retinopathy_diagnosis/__init__.py
|
jrubin01/bdl-benchmarks
|
1a6118750006febcd73a68af488a61673b6924b1
|
[
"Apache-2.0"
] | 2
|
2020-09-12T13:06:14.000Z
|
2020-12-01T13:22:45.000Z
|
# Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| 49.142857
| 80
| 0.677326
| true
| true
|
|
f7170733d9d5dfa19bfa97dde2fa1c168c81130c
| 9,133
|
py
|
Python
|
pysnmp-with-texts/V-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/V-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/V-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module V-BRIDGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/V-BRIDGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:33:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
dot1dBridge, dot1dBasePort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBridge", "dot1dBasePort")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, Counter64, IpAddress, ModuleIdentity, Counter32, ObjectIdentity, Gauge32, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "Counter64", "IpAddress", "ModuleIdentity", "Counter32", "ObjectIdentity", "Gauge32", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
vBridgeMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 17, 13))
vBridgeMIB.setRevisions(('2001-07-13 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: vBridgeMIB.setRevisionsDescriptions(('Draft 0',))
if mibBuilder.loadTexts: vBridgeMIB.setLastUpdated('200107130000Z')
if mibBuilder.loadTexts: vBridgeMIB.setOrganization('IETF Bridge MIB Working Group')
if mibBuilder.loadTexts: vBridgeMIB.setContactInfo('Email: Bridge-mib@ietf.org')
if mibBuilder.loadTexts: vBridgeMIB.setDescription('The Bridge MIB Extension module for managing devices that support VLAN Classification by Protocol and Port defined in IEEE 802.1v.')
vBridgeMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 1))
dot1vProtocol = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 1, 1))
dot1vProtocolGroupTable = MibTable((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1), )
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setReference('IEEE 802.1v clause 8.6.4')
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setDescription('A table that contains mappings from Protocol Templates to Protocol Group Identifiers used for Port-and-Protocol-based VLAN Classification.')
dot1vProtocolGroupEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1), ).setIndexNames((0, "V-BRIDGE-MIB", "dot1vProtocolTemplateFrameType"), (0, "V-BRIDGE-MIB", "dot1vProtocolTemplateProtocolValue"))
if mibBuilder.loadTexts: dot1vProtocolGroupEntry.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupEntry.setDescription('A mapping from a Protocol Template to a Protocol Group Identifier.')
dot1vProtocolTemplateFrameType = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ethernet", 1), ("rfc1042", 2), ("snap8021H", 3), ("snapOther", 4), ("llcOther", 5))))
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setReference('IEEE 802.1v clause 8.6.2')
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setDescription("The data-link encapsulation format or the 'detagged_frame_type' in a Protocol Template.")
dot1vProtocolTemplateProtocolValue = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 2), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(2, 2), ValueSizeConstraint(5, 5), )))
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setReference('IEEE 802.1v clause 8.6.2')
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setDescription("The identification of the protocol above the data-link layer in a Protocol Template. Depending on the frame type, the octet string will have one of the following values: For 'ethernet', 'rfc1042' and 'snap8021H', this is the 16-bit (2-octet) IEEE 802.3 Type Field. For 'snapOther', this is the 40-bit (5-octet) PID. For 'llcOther', this is the 2-octet IEEE 802.2 LSAP pair: first octet for DSAP and second octet for SSAP.")
dot1vProtocolGroupId = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dot1vProtocolGroupId.setReference('IEEE 802.1v clause 8.6.3, 12.10.2.1')
if mibBuilder.loadTexts: dot1vProtocolGroupId.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupId.setDescription('Represents a group of protocols that are associated together when assigning a VID to a frame.')
dot1vProtocolPortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2), )
if mibBuilder.loadTexts: dot1vProtocolPortTable.setReference('IEEE 802.1v clause 8.4.4')
if mibBuilder.loadTexts: dot1vProtocolPortTable.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortTable.setDescription('A table that contains VID sets used for Port-and-Protocol-based VLAN Classification.')
dot1vProtocolPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"), (0, "V-BRIDGE-MIB", "dot1vProtocolPortGroupId"))
if mibBuilder.loadTexts: dot1vProtocolPortEntry.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortEntry.setDescription('A VID set for a port.')
dot1vProtocolPortGroupId = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setReference('IEEE 802.1v clause 8.6.3, 12.10.1.2')
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setDescription('Designates a group of protocols in the Protocol Group Database.')
dot1vProtocolPortGroupVid = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setReference('IEEE 802.1v clause 8.4.4, 12.10.1.2')
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setDescription('The VID associated with a group of protocols for each port.')
vBridgeConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2))
vBridgeGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2, 1))
vBridgeCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2, 2))
vBridgeDeviceGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 17, 13, 2, 1, 1)).setObjects(("V-BRIDGE-MIB", "dot1vProtocolGroupId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgeDeviceGroup = vBridgeDeviceGroup.setStatus('current')
if mibBuilder.loadTexts: vBridgeDeviceGroup.setDescription('VLAN classification information for the bridge.')
vBridgePortGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 17, 13, 2, 1, 2)).setObjects(("V-BRIDGE-MIB", "dot1vProtocolPortGroupVid"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgePortGroup = vBridgePortGroup.setStatus('current')
if mibBuilder.loadTexts: vBridgePortGroup.setDescription('VLAN classification information for individual ports.')
vBridgeCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 17, 13, 2, 2, 1)).setObjects(("V-BRIDGE-MIB", "vBridgeDeviceGroup"), ("V-BRIDGE-MIB", "vBridgePortGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgeCompliance = vBridgeCompliance.setStatus('current')
if mibBuilder.loadTexts: vBridgeCompliance.setDescription('The compliance statement for device support of bridging services.')
mibBuilder.exportSymbols("V-BRIDGE-MIB", vBridgeGroups=vBridgeGroups, dot1vProtocolGroupId=dot1vProtocolGroupId, vBridgePortGroup=vBridgePortGroup, vBridgeConformance=vBridgeConformance, dot1vProtocolGroupTable=dot1vProtocolGroupTable, dot1vProtocolPortEntry=dot1vProtocolPortEntry, vBridgeDeviceGroup=vBridgeDeviceGroup, dot1vProtocolPortGroupVid=dot1vProtocolPortGroupVid, PYSNMP_MODULE_ID=vBridgeMIB, vBridgeCompliance=vBridgeCompliance, dot1vProtocolPortGroupId=dot1vProtocolPortGroupId, vBridgeMIBObjects=vBridgeMIBObjects, vBridgeMIB=vBridgeMIB, dot1vProtocolTemplateFrameType=dot1vProtocolTemplateFrameType, dot1vProtocol=dot1vProtocol, dot1vProtocolPortTable=dot1vProtocolPortTable, vBridgeCompliances=vBridgeCompliances, dot1vProtocolGroupEntry=dot1vProtocolGroupEntry, dot1vProtocolTemplateProtocolValue=dot1vProtocolTemplateProtocolValue)
| 118.61039
| 849
| 0.783861
|
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
dot1dBridge, dot1dBasePort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBridge", "dot1dBasePort")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, Counter64, IpAddress, ModuleIdentity, Counter32, ObjectIdentity, Gauge32, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "Counter64", "IpAddress", "ModuleIdentity", "Counter32", "ObjectIdentity", "Gauge32", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
vBridgeMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 17, 13))
vBridgeMIB.setRevisions(('2001-07-13 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: vBridgeMIB.setRevisionsDescriptions(('Draft 0',))
if mibBuilder.loadTexts: vBridgeMIB.setLastUpdated('200107130000Z')
if mibBuilder.loadTexts: vBridgeMIB.setOrganization('IETF Bridge MIB Working Group')
if mibBuilder.loadTexts: vBridgeMIB.setContactInfo('Email: Bridge-mib@ietf.org')
if mibBuilder.loadTexts: vBridgeMIB.setDescription('The Bridge MIB Extension module for managing devices that support VLAN Classification by Protocol and Port defined in IEEE 802.1v.')
vBridgeMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 1))
dot1vProtocol = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 1, 1))
dot1vProtocolGroupTable = MibTable((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1), )
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setReference('IEEE 802.1v clause 8.6.4')
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupTable.setDescription('A table that contains mappings from Protocol Templates to Protocol Group Identifiers used for Port-and-Protocol-based VLAN Classification.')
dot1vProtocolGroupEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1), ).setIndexNames((0, "V-BRIDGE-MIB", "dot1vProtocolTemplateFrameType"), (0, "V-BRIDGE-MIB", "dot1vProtocolTemplateProtocolValue"))
if mibBuilder.loadTexts: dot1vProtocolGroupEntry.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupEntry.setDescription('A mapping from a Protocol Template to a Protocol Group Identifier.')
dot1vProtocolTemplateFrameType = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ethernet", 1), ("rfc1042", 2), ("snap8021H", 3), ("snapOther", 4), ("llcOther", 5))))
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setReference('IEEE 802.1v clause 8.6.2')
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolTemplateFrameType.setDescription("The data-link encapsulation format or the 'detagged_frame_type' in a Protocol Template.")
dot1vProtocolTemplateProtocolValue = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 2), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(2, 2), ValueSizeConstraint(5, 5), )))
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setReference('IEEE 802.1v clause 8.6.2')
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolTemplateProtocolValue.setDescription("The identification of the protocol above the data-link layer in a Protocol Template. Depending on the frame type, the octet string will have one of the following values: For 'ethernet', 'rfc1042' and 'snap8021H', this is the 16-bit (2-octet) IEEE 802.3 Type Field. For 'snapOther', this is the 40-bit (5-octet) PID. For 'llcOther', this is the 2-octet IEEE 802.2 LSAP pair: first octet for DSAP and second octet for SSAP.")
dot1vProtocolGroupId = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dot1vProtocolGroupId.setReference('IEEE 802.1v clause 8.6.3, 12.10.2.1')
if mibBuilder.loadTexts: dot1vProtocolGroupId.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolGroupId.setDescription('Represents a group of protocols that are associated together when assigning a VID to a frame.')
dot1vProtocolPortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2), )
if mibBuilder.loadTexts: dot1vProtocolPortTable.setReference('IEEE 802.1v clause 8.4.4')
if mibBuilder.loadTexts: dot1vProtocolPortTable.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortTable.setDescription('A table that contains VID sets used for Port-and-Protocol-based VLAN Classification.')
dot1vProtocolPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort"), (0, "V-BRIDGE-MIB", "dot1vProtocolPortGroupId"))
if mibBuilder.loadTexts: dot1vProtocolPortEntry.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortEntry.setDescription('A VID set for a port.')
dot1vProtocolPortGroupId = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setReference('IEEE 802.1v clause 8.6.3, 12.10.1.2')
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortGroupId.setDescription('Designates a group of protocols in the Protocol Group Database.')
dot1vProtocolPortGroupVid = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 13, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setReference('IEEE 802.1v clause 8.4.4, 12.10.1.2')
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setStatus('current')
if mibBuilder.loadTexts: dot1vProtocolPortGroupVid.setDescription('The VID associated with a group of protocols for each port.')
vBridgeConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2))
vBridgeGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2, 1))
vBridgeCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 13, 2, 2))
vBridgeDeviceGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 17, 13, 2, 1, 1)).setObjects(("V-BRIDGE-MIB", "dot1vProtocolGroupId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgeDeviceGroup = vBridgeDeviceGroup.setStatus('current')
if mibBuilder.loadTexts: vBridgeDeviceGroup.setDescription('VLAN classification information for the bridge.')
vBridgePortGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 17, 13, 2, 1, 2)).setObjects(("V-BRIDGE-MIB", "dot1vProtocolPortGroupVid"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgePortGroup = vBridgePortGroup.setStatus('current')
if mibBuilder.loadTexts: vBridgePortGroup.setDescription('VLAN classification information for individual ports.')
vBridgeCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 17, 13, 2, 2, 1)).setObjects(("V-BRIDGE-MIB", "vBridgeDeviceGroup"), ("V-BRIDGE-MIB", "vBridgePortGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
vBridgeCompliance = vBridgeCompliance.setStatus('current')
if mibBuilder.loadTexts: vBridgeCompliance.setDescription('The compliance statement for device support of bridging services.')
mibBuilder.exportSymbols("V-BRIDGE-MIB", vBridgeGroups=vBridgeGroups, dot1vProtocolGroupId=dot1vProtocolGroupId, vBridgePortGroup=vBridgePortGroup, vBridgeConformance=vBridgeConformance, dot1vProtocolGroupTable=dot1vProtocolGroupTable, dot1vProtocolPortEntry=dot1vProtocolPortEntry, vBridgeDeviceGroup=vBridgeDeviceGroup, dot1vProtocolPortGroupVid=dot1vProtocolPortGroupVid, PYSNMP_MODULE_ID=vBridgeMIB, vBridgeCompliance=vBridgeCompliance, dot1vProtocolPortGroupId=dot1vProtocolPortGroupId, vBridgeMIBObjects=vBridgeMIBObjects, vBridgeMIB=vBridgeMIB, dot1vProtocolTemplateFrameType=dot1vProtocolTemplateFrameType, dot1vProtocol=dot1vProtocol, dot1vProtocolPortTable=dot1vProtocolPortTable, vBridgeCompliances=vBridgeCompliances, dot1vProtocolGroupEntry=dot1vProtocolGroupEntry, dot1vProtocolTemplateProtocolValue=dot1vProtocolTemplateProtocolValue)
| true
| true
|
f71708f6e5870838a55c5650db911c0b554c69fb
| 10,593
|
py
|
Python
|
oneflow/python/test/models/vgg16.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | 1
|
2020-12-04T03:06:16.000Z
|
2020-12-04T03:06:16.000Z
|
oneflow/python/test/models/vgg16.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/models/vgg16.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
from datetime import datetime
import numpy
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
_DATA_DIR = "/dataset/PNGS/PNG224/of_record_repeated"
_SINGLE_DATA_DIR = "/dataset/PNGS/PNG224/of_record"
_MODEL_LOAD_DIR = "/dataset/PNGS/cnns_model_for_test/vgg16/models/of_model"
_MODEL_SAVE_DIR = "./model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
)
NODE_LIST = "192.168.1.12,192.168.1.14"
class DLNetSpec(object):
def __init__(self, enable_auto_mixed_precision):
self.batch_size = 8
self.data_part_num = 32
self.eval_dir = _DATA_DIR
self.train_dir = _DATA_DIR
self.model_save_dir = _MODEL_SAVE_DIR
self.model_load_dir = _MODEL_LOAD_DIR
self.num_nodes = 1
self.gpu_num_per_node = 1
self.iter_num = 10
self.enable_auto_mixed_precision = enable_auto_mixed_precision
parser = argparse.ArgumentParser(description="flags for multi-node and resource")
parser.add_argument("-g", "--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("-i", "--iter_num", type=int, default=10, required=False)
parser.add_argument(
"-m", "--multinode", default=False, action="store_true", required=False
)
parser.add_argument("-n", "--node_list", type=str, default=NODE_LIST, required=False)
parser.add_argument(
"-s", "--skip_scp_binary", default=False, action="store_true", required=False
)
parser.add_argument(
"-c",
"--scp_binary_without_uuid",
default=False,
action="store_true",
required=False,
)
parser.add_argument(
"-r", "--remote_by_hand", default=False, action="store_true", required=False
)
parser.add_argument("-e", "--eval_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument("-t", "--train_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument(
"-load", "--model_load_dir", type=str, default=_MODEL_LOAD_DIR, required=False
)
parser.add_argument(
"-save", "--model_save_dir", type=str, default=_MODEL_SAVE_DIR, required=False
)
parser.add_argument("-dn", "--data_part_num", type=int, default=32, required=False)
parser.add_argument("-b", "--batch_size", type=int, default=8, required=False)
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="VALID",
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kRelu,
use_bias=True,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.constant_initializer(),
):
weight_shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
)
output = flow.nn.conv2d(
input, weight, strides, padding, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
)
output = flow.nn.bias_add(output, bias, "NCHW")
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
else:
raise NotImplementedError
return output
def _data_load_layer(args, data_dir):
node_num = args.num_nodes
total_batch_size = args.batch_size * args.gpu_num_per_node * node_num
rgb_mean = [123.68, 116.78, 103.94]
ofrecord = flow.data.ofrecord_reader(
data_dir,
batch_size=total_batch_size,
data_part_num=args.data_part_num,
name="decode",
)
image = flow.data.ofrecord_image_decoder(ofrecord, "encoded", color_space="RGB")
label = flow.data.ofrecord_raw_decoder(
ofrecord, "class/label", shape=(), dtype=flow.int32
)
rsz = flow.image.resize(image, resize_x=224, resize_y=224, color_space="RGB")
normal = flow.image.crop_mirror_normalize(
rsz,
color_space="RGB",
output_layout="NCHW",
mean=rgb_mean,
output_dtype=flow.float,
)
return label, normal
def _conv_block(in_blob, index, filters, conv_times):
conv_block = []
conv_block.insert(0, in_blob)
for i in range(conv_times):
conv_i = _conv2d_layer(
name="conv{}".format(index),
input=conv_block[i],
filters=filters,
kernel_size=3,
strides=1,
)
conv_block.append(conv_i)
index += 1
return conv_block
def vgg(images, labels, trainable=True):
to_return = []
conv1 = _conv_block(images, 0, 64, 2)
pool1 = flow.nn.max_pool2d(conv1[-1], 2, 2, "VALID", "NCHW", name="pool1")
conv2 = _conv_block(pool1, 2, 128, 2)
pool2 = flow.nn.max_pool2d(conv2[-1], 2, 2, "VALID", "NCHW", name="pool2")
conv3 = _conv_block(pool2, 4, 256, 3)
pool3 = flow.nn.max_pool2d(conv3[-1], 2, 2, "VALID", "NCHW", name="pool3")
conv4 = _conv_block(pool3, 7, 512, 3)
pool4 = flow.nn.max_pool2d(conv4[-1], 2, 2, "VALID", "NCHW", name="pool4")
conv5 = _conv_block(pool4, 10, 512, 3)
pool5 = flow.nn.max_pool2d(conv5[-1], 2, 2, "VALID", "NCHW", name="pool5")
def _get_kernel_initializer():
kernel_initializer = op_conf_util.InitializerConf()
kernel_initializer.truncated_normal_conf.std = 0.816496580927726
return kernel_initializer
def _get_bias_initializer():
bias_initializer = op_conf_util.InitializerConf()
bias_initializer.constant_conf.value = 0.0
return bias_initializer
pool5 = flow.reshape(pool5, [-1, 512])
fc6 = flow.layers.dense(
inputs=pool5,
units=4096,
activation=flow.math.relu,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc1",
)
fc7 = flow.layers.dense(
inputs=fc6,
units=4096,
activation=flow.math.relu,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc2",
)
fc8 = flow.layers.dense(
inputs=fc7,
units=1001,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc_final",
)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, fc8, name="softmax_loss"
)
to_return.append(loss)
return tuple(to_return)
def main(args):
flow.config.machine_num(args.num_nodes)
flow.config.gpu_device_num(args.gpu_num_per_node)
train_config = flow.FunctionConfig()
train_config.default_distribute_strategy(flow.scope.consistent_view())
train_config.default_data_type(flow.float)
train_config.train.primary_lr(0.00001)
train_config.train.model_update_conf(dict(naive_conf={}))
train_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
@flow.global_function(train_config)
def vgg_train_job():
(labels, images) = _data_load_layer(args, args.train_dir)
to_return = vgg(images, labels)
loss = to_return[-1]
flow.losses.add_loss(loss)
return loss
eval_config = flow.FunctionConfig()
eval_config.default_distribute_strategy(flow.scope.consistent_view())
eval_config.default_data_type(flow.float)
eval_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
@flow.global_function(eval_config)
def vgg_eval_job():
(labels, images) = _data_load_layer(args, args.eval_dir)
return vgg(images, labels, False)
check_point = flow.train.CheckPoint()
if not args.model_load_dir:
check_point.init()
else:
check_point.load(args.model_load_dir)
num_nodes = args.num_nodes
print(
"Traning vgg16: num_gpu_per_node = {}, num_nodes = {}.".format(
args.gpu_num_per_node, num_nodes
)
)
print("{:>12} {:>12} {:>12}".format("iter", "loss type", "loss value"))
loss = []
for i in range(args.iter_num):
train_loss = vgg_train_job().get().mean()
loss.append(train_loss)
fmt_str = "{:>12} {:>12} {:>12.6f}"
print(fmt_str.format(i, "train loss:", train_loss))
# if (i + 1) % 10 == 0:
# eval_loss = alexnet_eval_job().get().mean()
# print(
# fmt_str.format(
# i, "eval loss:", eval_loss
# )
# )
if (i + 1) % 100 == 0:
check_point.save(_MODEL_SAVE_DIR + str(i))
# save loss to file
loss_file = "{}n{}c.npy".format(
str(num_nodes), str(args.gpu_num_per_node * num_nodes)
)
loss_path = "./of_loss/vgg16"
if not os.path.exists(loss_path):
os.makedirs(loss_path)
numpy.save(os.path.join(loss_path, loss_file), loss)
if __name__ == "__main__":
args = parser.parse_args()
flow.env.grpc_use_no_signal()
flow.env.log_dir("./log")
if args.multinode:
flow.env.ctrl_port(12138)
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
if args.remote_by_hand is False:
if args.scp_binary_without_uuid:
flow.deprecated.init_worker(scp_binary=True, use_uuid=False)
elif args.skip_scp_binary:
flow.deprecated.init_worker(scp_binary=False, use_uuid=False)
else:
flow.deprecated.init_worker(scp_binary=True, use_uuid=True)
main(args)
if (
args.multinode
and args.skip_scp_binary is False
and args.scp_binary_without_uuid is False
):
flow.deprecated.delete_worker()
| 31.906627
| 85
| 0.657604
|
import argparse
import os
from datetime import datetime
import numpy
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
_DATA_DIR = "/dataset/PNGS/PNG224/of_record_repeated"
_SINGLE_DATA_DIR = "/dataset/PNGS/PNG224/of_record"
_MODEL_LOAD_DIR = "/dataset/PNGS/cnns_model_for_test/vgg16/models/of_model"
_MODEL_SAVE_DIR = "./model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
)
NODE_LIST = "192.168.1.12,192.168.1.14"
class DLNetSpec(object):
def __init__(self, enable_auto_mixed_precision):
self.batch_size = 8
self.data_part_num = 32
self.eval_dir = _DATA_DIR
self.train_dir = _DATA_DIR
self.model_save_dir = _MODEL_SAVE_DIR
self.model_load_dir = _MODEL_LOAD_DIR
self.num_nodes = 1
self.gpu_num_per_node = 1
self.iter_num = 10
self.enable_auto_mixed_precision = enable_auto_mixed_precision
parser = argparse.ArgumentParser(description="flags for multi-node and resource")
parser.add_argument("-g", "--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("-i", "--iter_num", type=int, default=10, required=False)
parser.add_argument(
"-m", "--multinode", default=False, action="store_true", required=False
)
parser.add_argument("-n", "--node_list", type=str, default=NODE_LIST, required=False)
parser.add_argument(
"-s", "--skip_scp_binary", default=False, action="store_true", required=False
)
parser.add_argument(
"-c",
"--scp_binary_without_uuid",
default=False,
action="store_true",
required=False,
)
parser.add_argument(
"-r", "--remote_by_hand", default=False, action="store_true", required=False
)
parser.add_argument("-e", "--eval_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument("-t", "--train_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument(
"-load", "--model_load_dir", type=str, default=_MODEL_LOAD_DIR, required=False
)
parser.add_argument(
"-save", "--model_save_dir", type=str, default=_MODEL_SAVE_DIR, required=False
)
parser.add_argument("-dn", "--data_part_num", type=int, default=32, required=False)
parser.add_argument("-b", "--batch_size", type=int, default=8, required=False)
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="VALID",
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kRelu,
use_bias=True,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.constant_initializer(),
):
weight_shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
)
output = flow.nn.conv2d(
input, weight, strides, padding, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
)
output = flow.nn.bias_add(output, bias, "NCHW")
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
else:
raise NotImplementedError
return output
def _data_load_layer(args, data_dir):
node_num = args.num_nodes
total_batch_size = args.batch_size * args.gpu_num_per_node * node_num
rgb_mean = [123.68, 116.78, 103.94]
ofrecord = flow.data.ofrecord_reader(
data_dir,
batch_size=total_batch_size,
data_part_num=args.data_part_num,
name="decode",
)
image = flow.data.ofrecord_image_decoder(ofrecord, "encoded", color_space="RGB")
label = flow.data.ofrecord_raw_decoder(
ofrecord, "class/label", shape=(), dtype=flow.int32
)
rsz = flow.image.resize(image, resize_x=224, resize_y=224, color_space="RGB")
normal = flow.image.crop_mirror_normalize(
rsz,
color_space="RGB",
output_layout="NCHW",
mean=rgb_mean,
output_dtype=flow.float,
)
return label, normal
def _conv_block(in_blob, index, filters, conv_times):
conv_block = []
conv_block.insert(0, in_blob)
for i in range(conv_times):
conv_i = _conv2d_layer(
name="conv{}".format(index),
input=conv_block[i],
filters=filters,
kernel_size=3,
strides=1,
)
conv_block.append(conv_i)
index += 1
return conv_block
def vgg(images, labels, trainable=True):
to_return = []
conv1 = _conv_block(images, 0, 64, 2)
pool1 = flow.nn.max_pool2d(conv1[-1], 2, 2, "VALID", "NCHW", name="pool1")
conv2 = _conv_block(pool1, 2, 128, 2)
pool2 = flow.nn.max_pool2d(conv2[-1], 2, 2, "VALID", "NCHW", name="pool2")
conv3 = _conv_block(pool2, 4, 256, 3)
pool3 = flow.nn.max_pool2d(conv3[-1], 2, 2, "VALID", "NCHW", name="pool3")
conv4 = _conv_block(pool3, 7, 512, 3)
pool4 = flow.nn.max_pool2d(conv4[-1], 2, 2, "VALID", "NCHW", name="pool4")
conv5 = _conv_block(pool4, 10, 512, 3)
pool5 = flow.nn.max_pool2d(conv5[-1], 2, 2, "VALID", "NCHW", name="pool5")
def _get_kernel_initializer():
kernel_initializer = op_conf_util.InitializerConf()
kernel_initializer.truncated_normal_conf.std = 0.816496580927726
return kernel_initializer
def _get_bias_initializer():
bias_initializer = op_conf_util.InitializerConf()
bias_initializer.constant_conf.value = 0.0
return bias_initializer
pool5 = flow.reshape(pool5, [-1, 512])
fc6 = flow.layers.dense(
inputs=pool5,
units=4096,
activation=flow.math.relu,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc1",
)
fc7 = flow.layers.dense(
inputs=fc6,
units=4096,
activation=flow.math.relu,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc2",
)
fc8 = flow.layers.dense(
inputs=fc7,
units=1001,
use_bias=True,
kernel_initializer=_get_kernel_initializer(),
bias_initializer=_get_bias_initializer(),
trainable=trainable,
name="fc_final",
)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, fc8, name="softmax_loss"
)
to_return.append(loss)
return tuple(to_return)
def main(args):
flow.config.machine_num(args.num_nodes)
flow.config.gpu_device_num(args.gpu_num_per_node)
train_config = flow.FunctionConfig()
train_config.default_distribute_strategy(flow.scope.consistent_view())
train_config.default_data_type(flow.float)
train_config.train.primary_lr(0.00001)
train_config.train.model_update_conf(dict(naive_conf={}))
train_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
@flow.global_function(train_config)
def vgg_train_job():
(labels, images) = _data_load_layer(args, args.train_dir)
to_return = vgg(images, labels)
loss = to_return[-1]
flow.losses.add_loss(loss)
return loss
eval_config = flow.FunctionConfig()
eval_config.default_distribute_strategy(flow.scope.consistent_view())
eval_config.default_data_type(flow.float)
eval_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
@flow.global_function(eval_config)
def vgg_eval_job():
(labels, images) = _data_load_layer(args, args.eval_dir)
return vgg(images, labels, False)
check_point = flow.train.CheckPoint()
if not args.model_load_dir:
check_point.init()
else:
check_point.load(args.model_load_dir)
num_nodes = args.num_nodes
print(
"Traning vgg16: num_gpu_per_node = {}, num_nodes = {}.".format(
args.gpu_num_per_node, num_nodes
)
)
print("{:>12} {:>12} {:>12}".format("iter", "loss type", "loss value"))
loss = []
for i in range(args.iter_num):
train_loss = vgg_train_job().get().mean()
loss.append(train_loss)
fmt_str = "{:>12} {:>12} {:>12.6f}"
print(fmt_str.format(i, "train loss:", train_loss))
if (i + 1) % 100 == 0:
check_point.save(_MODEL_SAVE_DIR + str(i))
loss_file = "{}n{}c.npy".format(
str(num_nodes), str(args.gpu_num_per_node * num_nodes)
)
loss_path = "./of_loss/vgg16"
if not os.path.exists(loss_path):
os.makedirs(loss_path)
numpy.save(os.path.join(loss_path, loss_file), loss)
if __name__ == "__main__":
args = parser.parse_args()
flow.env.grpc_use_no_signal()
flow.env.log_dir("./log")
if args.multinode:
flow.env.ctrl_port(12138)
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
if args.remote_by_hand is False:
if args.scp_binary_without_uuid:
flow.deprecated.init_worker(scp_binary=True, use_uuid=False)
elif args.skip_scp_binary:
flow.deprecated.init_worker(scp_binary=False, use_uuid=False)
else:
flow.deprecated.init_worker(scp_binary=True, use_uuid=True)
main(args)
if (
args.multinode
and args.skip_scp_binary is False
and args.scp_binary_without_uuid is False
):
flow.deprecated.delete_worker()
| true
| true
|
f71709855ef2b9332e7d2df3b90c353b07b8c069
| 4,134
|
py
|
Python
|
workflow/scripts/FilterUncorrectabledPEfastq.py
|
osvaldoreisss/transcriptome_assembly_workflow
|
bef17c05cb0287e2b46f6018cac757be58b508c5
|
[
"MIT"
] | null | null | null |
workflow/scripts/FilterUncorrectabledPEfastq.py
|
osvaldoreisss/transcriptome_assembly_workflow
|
bef17c05cb0287e2b46f6018cac757be58b508c5
|
[
"MIT"
] | null | null | null |
workflow/scripts/FilterUncorrectabledPEfastq.py
|
osvaldoreisss/transcriptome_assembly_workflow
|
bef17c05cb0287e2b46f6018cac757be58b508c5
|
[
"MIT"
] | null | null | null |
"""
author: adam h freedman
afreedman405 at gmail.com
data: Fri Aug 26 10:55:18 EDT 2016
This script takes as an input Rcorrector error corrected Illumina paired-reads
in fastq format and:
1. Removes any reads that Rcorrector indentifes as containing an error,
but can't be corrected, typically low complexity sequences. For these,
the header contains 'unfixable'.
2. Strips the ' cor' from headers of reads that Rcorrector fixed, to avoid
issues created by certain header formats for downstream tools.
3. Write a log with counts of (a) read pairs that were removed because one end
was unfixable, (b) corrected left and right reads, (c) total number of
read pairs containing at least one corrected read.
Currently, this script only handles paired-end data, and handle either unzipped
or gzipped files on the fly, so long as the gzipped files end with 'gz'.
"""
import sys
import gzip
from itertools import izip,izip_longest
import argparse
from os.path import basename
def get_input_streams(r1file,r2file):
if r1file[-2:]=='gz':
r1handle=gzip.open(r1file,'rb')
r2handle=gzip.open(r2file,'rb')
else:
r1handle=open(r1file,'r')
r2handle=open(r2file,'r')
return r1handle,r2handle
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
if __name__=="__main__":
parser = argparse.ArgumentParser(description="options for filtering and logging rCorrector fastq outputs")
parser.add_argument('-1','--left_reads',dest='leftreads',type=str,help='R1 fastq file')
parser.add_argument('-2','--right_reads',dest='rightreads',type=str,help='R2 fastq file')
parser.add_argument('-s','--sample_id',dest='id',type=str,help='sample name to write to log file')
opts = parser.parse_args()
r1out=open('unfixrm_%s' % basename(opts.leftreads).replace('.gz',''),'w')
r2out=open('unfixrm_%s' % basename(opts.rightreads).replace('.gz','') ,'w')
r1_cor_count=0
r2_cor_count=0
pair_cor_count=0
unfix_r1_count=0
unfix_r2_count=0
unfix_both_count=0
r1_stream,r2_stream=get_input_streams(opts.leftreads,opts.rightreads)
with r1_stream as f1, r2_stream as f2:
R1=grouper(f1,4)
R2=grouper(f2,4)
counter=0
for entry in R1:
counter+=1
if counter%100000==0:
print "%s reads processed" % counter
head1,seq1,placeholder1,qual1=[i.strip() for i in entry]
head2,seq2,placeholder2,qual2=[j.strip() for j in R2.next()]
if 'unfixable' in head1 and 'unfixable' not in head2:
unfix_r1_count+=1
elif 'unfixable' in head2 and 'unfixable' not in head1:
unfix_r2_count+=1
elif 'unfixable' in head1 and 'unfixable' in head2:
unfix_both_count+=1
else:
if 'cor' in head1:
r1_cor_count+=1
if 'cor' in head2:
r2_cor_count+=1
if 'cor' in head1 or 'cor' in head2:
pair_cor_count+=1
head1=head1.split('l:')[0][:-1]
head2=head2.split('l:')[0][:-1]
r1out.write('%s\n' % '\n'.join([head1,seq1,placeholder1,qual1]))
r2out.write('%s\n' % '\n'.join([head2,seq2,placeholder2,qual2]))
total_unfixable = unfix_r1_count+unfix_r2_count+unfix_both_count
total_retained = counter - total_unfixable
unfix_log=open('rmunfixable_%s.log' % opts.id,'w')
unfix_log.write('total PE reads:%s\nremoved PE reads:%s\nretained PE reads:%s\nR1 corrected:%s\nR2 corrected:%s\npairs corrected:%s\nR1 unfixable:%s\nR2 unfixable:%s\nboth reads unfixable:%s\n' % (counter,total_unfixable,total_retained,r1_cor_count,r2_cor_count,pair_cor_count,unfix_r1_count,unfix_r2_count,unfix_both_count))
r1out.close()
r2out.close()
unfix_log.close()
| 38.277778
| 329
| 0.648766
|
"""
author: adam h freedman
afreedman405 at gmail.com
data: Fri Aug 26 10:55:18 EDT 2016
This script takes as an input Rcorrector error corrected Illumina paired-reads
in fastq format and:
1. Removes any reads that Rcorrector indentifes as containing an error,
but can't be corrected, typically low complexity sequences. For these,
the header contains 'unfixable'.
2. Strips the ' cor' from headers of reads that Rcorrector fixed, to avoid
issues created by certain header formats for downstream tools.
3. Write a log with counts of (a) read pairs that were removed because one end
was unfixable, (b) corrected left and right reads, (c) total number of
read pairs containing at least one corrected read.
Currently, this script only handles paired-end data, and handle either unzipped
or gzipped files on the fly, so long as the gzipped files end with 'gz'.
"""
import sys
import gzip
from itertools import izip,izip_longest
import argparse
from os.path import basename
def get_input_streams(r1file,r2file):
if r1file[-2:]=='gz':
r1handle=gzip.open(r1file,'rb')
r2handle=gzip.open(r2file,'rb')
else:
r1handle=open(r1file,'r')
r2handle=open(r2file,'r')
return r1handle,r2handle
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
if __name__=="__main__":
parser = argparse.ArgumentParser(description="options for filtering and logging rCorrector fastq outputs")
parser.add_argument('-1','--left_reads',dest='leftreads',type=str,help='R1 fastq file')
parser.add_argument('-2','--right_reads',dest='rightreads',type=str,help='R2 fastq file')
parser.add_argument('-s','--sample_id',dest='id',type=str,help='sample name to write to log file')
opts = parser.parse_args()
r1out=open('unfixrm_%s' % basename(opts.leftreads).replace('.gz',''),'w')
r2out=open('unfixrm_%s' % basename(opts.rightreads).replace('.gz','') ,'w')
r1_cor_count=0
r2_cor_count=0
pair_cor_count=0
unfix_r1_count=0
unfix_r2_count=0
unfix_both_count=0
r1_stream,r2_stream=get_input_streams(opts.leftreads,opts.rightreads)
with r1_stream as f1, r2_stream as f2:
R1=grouper(f1,4)
R2=grouper(f2,4)
counter=0
for entry in R1:
counter+=1
if counter%100000==0:
print "%s reads processed" % counter
head1,seq1,placeholder1,qual1=[i.strip() for i in entry]
head2,seq2,placeholder2,qual2=[j.strip() for j in R2.next()]
if 'unfixable' in head1 and 'unfixable' not in head2:
unfix_r1_count+=1
elif 'unfixable' in head2 and 'unfixable' not in head1:
unfix_r2_count+=1
elif 'unfixable' in head1 and 'unfixable' in head2:
unfix_both_count+=1
else:
if 'cor' in head1:
r1_cor_count+=1
if 'cor' in head2:
r2_cor_count+=1
if 'cor' in head1 or 'cor' in head2:
pair_cor_count+=1
head1=head1.split('l:')[0][:-1]
head2=head2.split('l:')[0][:-1]
r1out.write('%s\n' % '\n'.join([head1,seq1,placeholder1,qual1]))
r2out.write('%s\n' % '\n'.join([head2,seq2,placeholder2,qual2]))
total_unfixable = unfix_r1_count+unfix_r2_count+unfix_both_count
total_retained = counter - total_unfixable
unfix_log=open('rmunfixable_%s.log' % opts.id,'w')
unfix_log.write('total PE reads:%s\nremoved PE reads:%s\nretained PE reads:%s\nR1 corrected:%s\nR2 corrected:%s\npairs corrected:%s\nR1 unfixable:%s\nR2 unfixable:%s\nboth reads unfixable:%s\n' % (counter,total_unfixable,total_retained,r1_cor_count,r2_cor_count,pair_cor_count,unfix_r1_count,unfix_r2_count,unfix_both_count))
r1out.close()
r2out.close()
unfix_log.close()
| false
| true
|
f71709ad2911d4c2d64410be79854cf9a92e3003
| 13,191
|
py
|
Python
|
pommerman/envs/v0.py
|
xysun/playground
|
20f9a7e0eb3d24e7cd32d8afd94b767b8fcc00b4
|
[
"Apache-2.0"
] | null | null | null |
pommerman/envs/v0.py
|
xysun/playground
|
20f9a7e0eb3d24e7cd32d8afd94b767b8fcc00b4
|
[
"Apache-2.0"
] | null | null | null |
pommerman/envs/v0.py
|
xysun/playground
|
20f9a7e0eb3d24e7cd32d8afd94b767b8fcc00b4
|
[
"Apache-2.0"
] | null | null | null |
"""The baseline Pommerman environment.
This evironment acts as game manager for Pommerman. Further environments,
such as in v1.py, will inherit from this.
"""
import json
import os
import numpy as np
import time
from gym import spaces
from gym.utils import seeding
import gym
from .. import characters
from .. import constants
from .. import forward_model
from .. import graphics
from .. import utility
class Pomme(gym.Env):
'''The base pommerman env.'''
metadata = {
'render.modes': ['human', 'rgb_array', 'rgb_pixel'],
}
def __init__(self,
render_fps=None,
game_type=None,
board_size=None,
agent_view_size=None,
num_rigid=None,
num_wood=None,
num_items=None,
max_steps=1000,
is_partially_observable=False,
env=None,
**kwargs):
self._render_fps = render_fps
self._agents = None
self._game_type = game_type
self._board_size = board_size
self._agent_view_size = agent_view_size
self._num_rigid = num_rigid
self._num_wood = num_wood
self._num_items = num_items
self._max_steps = max_steps
self._viewer = None
self._is_partially_observable = is_partially_observable
self._env = env
self.training_agent = None
self.model = forward_model.ForwardModel()
# This can be changed through set_render_mode
# or from the cli tool using '--render_mode=MODE_TYPE'
self._mode = 'human'
# Observation and Action Spaces. These are both geared towards a single
# agent even though the environment expects actions and returns
# observations for all four agents. We do this so that it's clear what
# the actions and obs are for a single agent. Wrt the observations,
# they are actually returned as a dict for easier understanding.
self._set_action_space()
self._set_observation_space()
def _set_action_space(self):
self.action_space = spaces.Discrete(6)
def set_render_mode(self, mode):
self._mode = mode
def _set_observation_space(self):
"""The Observation Space for each agent.
There are a total of 3*board_size^2+12 observations:
- all of the board (board_size^2)
- bomb blast strength (board_size^2).
- bomb life (board_size^2)
- agent's position (2)
- player ammo counts (1)
- blast strength (1)
- can_kick (1)
- teammate (one of {AgentDummy.value, Agent3.value}).
- enemies (three of {AgentDummy.value, Agent3.value}).
"""
bss = self._board_size**2
min_obs = [0] * 3 * bss + [0] * 5 + [constants.Item.AgentDummy.value
] * 4
max_obs = [len(constants.Item)] * bss + [self._board_size
] * bss + [25] * bss
max_obs += [self._board_size] * 2 + [self._num_items] * 2 + [1]
max_obs += [constants.Item.Agent3.value] * 4
self.observation_space = spaces.Box(
np.array(min_obs), np.array(max_obs))
def set_agents(self, agents):
self._agents = agents
def set_training_agent(self, agent_id):
self.training_agent = agent_id
def set_init_game_state(self, game_state_file):
"""Set the initial game state.
The expected game_state_file JSON format is:
- agents: list of agents serialized (agent_id, is_alive, position,
ammo, blast_strength, can_kick)
- board: board matrix topology (board_size^2)
- board_size: board size
- bombs: list of bombs serialized (position, bomber_id, life,
blast_strength, moving_direction)
- flames: list of flames serialized (position, life)
- items: list of item by position
- step_count: step count
Args:
game_state_file: JSON File input.
"""
self._init_game_state = None
if game_state_file:
with open(game_state_file, 'r') as f:
self._init_game_state = json.loads(f.read())
def make_board(self):
self._board = utility.make_board(self._board_size, self._num_rigid,
self._num_wood)
def make_items(self):
self._items = utility.make_items(self._board, self._num_items)
def act(self, obs):
agents = [agent for agent in self._agents \
if agent.agent_id != self.training_agent]
return self.model.act(agents, obs, self.action_space)
def get_observations(self):
self.observations = self.model.get_observations(
self._board, self._agents, self._bombs,
self._is_partially_observable, self._agent_view_size,
self._game_type, self._env)
return self.observations
def _get_rewards(self):
return self.model.get_rewards(self._agents, self._game_type,
self._step_count, self._max_steps)
def _get_done(self):
return self.model.get_done(self._agents, self._step_count,
self._max_steps, self._game_type,
self.training_agent)
def _get_info(self, done, rewards):
return self.model.get_info(done, rewards, self._game_type, self._agents)
def reset(self):
assert (self._agents is not None)
if self._init_game_state is not None:
self.set_json_info()
else:
self._step_count = 0
self.make_board()
self.make_items()
self._bombs = []
self._flames = []
self._powerups = []
for agent_id, agent in enumerate(self._agents):
pos = np.where(self._board == utility.agent_value(agent_id))
row = pos[0][0]
col = pos[1][0]
agent.set_start_position((row, col))
agent.reset()
return self.get_observations()
def seed(self, seed=None):
gym.spaces.prng.seed(seed)
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, actions):
max_blast_strength = self._agent_view_size or 10
result = self.model.step(
actions,
self._board,
self._agents,
self._bombs,
self._items,
self._flames,
max_blast_strength=max_blast_strength)
self._board, self._agents, self._bombs, self._items, self._flames = \
result[:5]
done = self._get_done()
obs = self.get_observations()
reward = self._get_rewards()
info = self._get_info(done, reward)
self._step_count += 1
return obs, reward, done, info
def render(self,
mode=None,
close=False,
record_pngs_dir=None,
record_json_dir=None,
do_sleep=True):
if close:
self.close()
return
mode = mode or self._mode or 'human'
if mode == 'rgb_array':
rgb_array = graphics.PixelViewer.rgb_array(
self._board, self._board_size, self._agents,
self._is_partially_observable, self._agent_view_size)
return rgb_array[0]
if self._viewer is None:
if mode == 'rgb_pixel':
self._viewer = graphics.PixelViewer(
board_size=self._board_size,
agents=self._agents,
agent_view_size=self._agent_view_size,
partially_observable=self._is_partially_observable)
else:
self._viewer = graphics.PommeViewer(
board_size=self._board_size,
agents=self._agents,
partially_observable=self._is_partially_observable,
agent_view_size=self._agent_view_size,
game_type=self._game_type)
self._viewer.set_board(self._board)
self._viewer.set_agents(self._agents)
self._viewer.set_step(self._step_count)
self._viewer.render()
# Register all agents which need human input with Pyglet.
# This needs to be done here as the first `imshow` creates the
# window. Using `push_handlers` allows for easily creating agents
# that use other Pyglet inputs such as joystick, for example.
for agent in self._agents:
if agent.has_user_input():
self._viewer.window.push_handlers(agent)
else:
self._viewer.set_board(self._board)
self._viewer.set_agents(self._agents)
self._viewer.set_step(self._step_count)
self._viewer.render()
if record_pngs_dir:
self._viewer.save(record_pngs_dir)
if record_json_dir:
self.save_json(record_json_dir)
if do_sleep:
time.sleep(1.0 / self._render_fps)
def close(self):
if self._viewer is not None:
self._viewer.close()
self._viewer = None
for agent in self._agents:
agent.shutdown()
@staticmethod
def featurize(obs):
board = obs["board"].reshape(-1).astype(np.float32)
bomb_blast_strength = obs["bomb_blast_strength"].reshape(-1) \
.astype(np.float32)
bomb_life = obs["bomb_life"].reshape(-1).astype(np.float32)
position = utility.make_np_float(obs["position"])
ammo = utility.make_np_float([obs["ammo"]])
blast_strength = utility.make_np_float([obs["blast_strength"]])
can_kick = utility.make_np_float([obs["can_kick"]])
teammate = utility.make_np_float([obs["teammate"].value])
enemies = utility.make_np_float([e.value for e in obs["enemies"]])
return np.concatenate(
(board, bomb_blast_strength, bomb_life, position, ammo,
blast_strength, can_kick, teammate, enemies))
def save_json(self, record_json_dir):
info = self.get_json_info()
count = "{0:0=3d}".format(self._step_count)
suffix = count + '.json'
path = os.path.join(record_json_dir, suffix)
with open(path, 'w') as f:
f.write(json.dumps(info, sort_keys=True, indent=4))
def get_json_info(self):
"""Returns a json snapshot of the current game state."""
ret = {
'board_size': self._board_size,
'step_count': self._step_count,
'board': self._board,
'agents': self._agents,
'bombs': self._bombs,
'flames': self._flames,
'items': [[k, i] for k, i in self._items.items()]
}
for key, value in ret.items():
ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)
return ret
def set_json_info(self):
"""Sets the game state as the init_game_state."""
board_size = int(self._init_game_state['board_size'])
self._board_size = board_size
self._step_count = int(self._init_game_state['step_count'])
board_array = json.loads(self._init_game_state['board'])
self._board = np.ones((board_size, board_size)).astype(np.uint8)
self._board *= constants.Item.Passage.value
for x in range(self._board_size):
for y in range(self._board_size):
self._board[x, y] = board_array[x][y]
self._items = {}
item_array = json.loads(self._init_game_state['items'])
for i in item_array:
self._items[tuple(i[0])] = i[1]
agent_array = json.loads(self._init_game_state['agents'])
for a in agent_array:
agent = next(x for x in self._agents \
if x.agent_id == a['agent_id'])
agent.set_start_position((a['position'][0], a['position'][1]))
agent.reset(
int(a['ammo']), bool(a['is_alive']), int(a['blast_strength']),
bool(a['can_kick']))
self._bombs = []
bomb_array = json.loads(self._init_game_state['bombs'])
for b in bomb_array:
bomber = next(x for x in self._agents \
if x.agent_id == b['bomber_id'])
moving_direction = b['moving_direction']
if moving_direction is not None:
moving_direction = constants.Action(moving_direction)
self._bombs.append(
characters.Bomb(bomber, tuple(b['position']), int(b['life']),
int(b['blast_strength']), moving_direction))
self._flames = []
flame_array = json.loads(self._init_game_state['flames'])
for f in flame_array:
self._flames.append(
characters.Flame(tuple(f['position']), f['life']))
| 37.157746
| 80
| 0.578197
|
import json
import os
import numpy as np
import time
from gym import spaces
from gym.utils import seeding
import gym
from .. import characters
from .. import constants
from .. import forward_model
from .. import graphics
from .. import utility
class Pomme(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array', 'rgb_pixel'],
}
def __init__(self,
render_fps=None,
game_type=None,
board_size=None,
agent_view_size=None,
num_rigid=None,
num_wood=None,
num_items=None,
max_steps=1000,
is_partially_observable=False,
env=None,
**kwargs):
self._render_fps = render_fps
self._agents = None
self._game_type = game_type
self._board_size = board_size
self._agent_view_size = agent_view_size
self._num_rigid = num_rigid
self._num_wood = num_wood
self._num_items = num_items
self._max_steps = max_steps
self._viewer = None
self._is_partially_observable = is_partially_observable
self._env = env
self.training_agent = None
self.model = forward_model.ForwardModel()
self._mode = 'human'
# the actions and obs are for a single agent. Wrt the observations,
# they are actually returned as a dict for easier understanding.
self._set_action_space()
self._set_observation_space()
def _set_action_space(self):
self.action_space = spaces.Discrete(6)
def set_render_mode(self, mode):
self._mode = mode
def _set_observation_space(self):
bss = self._board_size**2
min_obs = [0] * 3 * bss + [0] * 5 + [constants.Item.AgentDummy.value
] * 4
max_obs = [len(constants.Item)] * bss + [self._board_size
] * bss + [25] * bss
max_obs += [self._board_size] * 2 + [self._num_items] * 2 + [1]
max_obs += [constants.Item.Agent3.value] * 4
self.observation_space = spaces.Box(
np.array(min_obs), np.array(max_obs))
def set_agents(self, agents):
self._agents = agents
def set_training_agent(self, agent_id):
self.training_agent = agent_id
def set_init_game_state(self, game_state_file):
self._init_game_state = None
if game_state_file:
with open(game_state_file, 'r') as f:
self._init_game_state = json.loads(f.read())
def make_board(self):
self._board = utility.make_board(self._board_size, self._num_rigid,
self._num_wood)
def make_items(self):
self._items = utility.make_items(self._board, self._num_items)
def act(self, obs):
agents = [agent for agent in self._agents \
if agent.agent_id != self.training_agent]
return self.model.act(agents, obs, self.action_space)
def get_observations(self):
self.observations = self.model.get_observations(
self._board, self._agents, self._bombs,
self._is_partially_observable, self._agent_view_size,
self._game_type, self._env)
return self.observations
def _get_rewards(self):
return self.model.get_rewards(self._agents, self._game_type,
self._step_count, self._max_steps)
def _get_done(self):
return self.model.get_done(self._agents, self._step_count,
self._max_steps, self._game_type,
self.training_agent)
def _get_info(self, done, rewards):
return self.model.get_info(done, rewards, self._game_type, self._agents)
def reset(self):
assert (self._agents is not None)
if self._init_game_state is not None:
self.set_json_info()
else:
self._step_count = 0
self.make_board()
self.make_items()
self._bombs = []
self._flames = []
self._powerups = []
for agent_id, agent in enumerate(self._agents):
pos = np.where(self._board == utility.agent_value(agent_id))
row = pos[0][0]
col = pos[1][0]
agent.set_start_position((row, col))
agent.reset()
return self.get_observations()
def seed(self, seed=None):
gym.spaces.prng.seed(seed)
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, actions):
max_blast_strength = self._agent_view_size or 10
result = self.model.step(
actions,
self._board,
self._agents,
self._bombs,
self._items,
self._flames,
max_blast_strength=max_blast_strength)
self._board, self._agents, self._bombs, self._items, self._flames = \
result[:5]
done = self._get_done()
obs = self.get_observations()
reward = self._get_rewards()
info = self._get_info(done, reward)
self._step_count += 1
return obs, reward, done, info
def render(self,
mode=None,
close=False,
record_pngs_dir=None,
record_json_dir=None,
do_sleep=True):
if close:
self.close()
return
mode = mode or self._mode or 'human'
if mode == 'rgb_array':
rgb_array = graphics.PixelViewer.rgb_array(
self._board, self._board_size, self._agents,
self._is_partially_observable, self._agent_view_size)
return rgb_array[0]
if self._viewer is None:
if mode == 'rgb_pixel':
self._viewer = graphics.PixelViewer(
board_size=self._board_size,
agents=self._agents,
agent_view_size=self._agent_view_size,
partially_observable=self._is_partially_observable)
else:
self._viewer = graphics.PommeViewer(
board_size=self._board_size,
agents=self._agents,
partially_observable=self._is_partially_observable,
agent_view_size=self._agent_view_size,
game_type=self._game_type)
self._viewer.set_board(self._board)
self._viewer.set_agents(self._agents)
self._viewer.set_step(self._step_count)
self._viewer.render()
# Register all agents which need human input with Pyglet.
# This needs to be done here as the first `imshow` creates the
# window. Using `push_handlers` allows for easily creating agents
# that use other Pyglet inputs such as joystick, for example.
for agent in self._agents:
if agent.has_user_input():
self._viewer.window.push_handlers(agent)
else:
self._viewer.set_board(self._board)
self._viewer.set_agents(self._agents)
self._viewer.set_step(self._step_count)
self._viewer.render()
if record_pngs_dir:
self._viewer.save(record_pngs_dir)
if record_json_dir:
self.save_json(record_json_dir)
if do_sleep:
time.sleep(1.0 / self._render_fps)
def close(self):
if self._viewer is not None:
self._viewer.close()
self._viewer = None
for agent in self._agents:
agent.shutdown()
@staticmethod
def featurize(obs):
board = obs["board"].reshape(-1).astype(np.float32)
bomb_blast_strength = obs["bomb_blast_strength"].reshape(-1) \
.astype(np.float32)
bomb_life = obs["bomb_life"].reshape(-1).astype(np.float32)
position = utility.make_np_float(obs["position"])
ammo = utility.make_np_float([obs["ammo"]])
blast_strength = utility.make_np_float([obs["blast_strength"]])
can_kick = utility.make_np_float([obs["can_kick"]])
teammate = utility.make_np_float([obs["teammate"].value])
enemies = utility.make_np_float([e.value for e in obs["enemies"]])
return np.concatenate(
(board, bomb_blast_strength, bomb_life, position, ammo,
blast_strength, can_kick, teammate, enemies))
def save_json(self, record_json_dir):
info = self.get_json_info()
count = "{0:0=3d}".format(self._step_count)
suffix = count + '.json'
path = os.path.join(record_json_dir, suffix)
with open(path, 'w') as f:
f.write(json.dumps(info, sort_keys=True, indent=4))
def get_json_info(self):
ret = {
'board_size': self._board_size,
'step_count': self._step_count,
'board': self._board,
'agents': self._agents,
'bombs': self._bombs,
'flames': self._flames,
'items': [[k, i] for k, i in self._items.items()]
}
for key, value in ret.items():
ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)
return ret
def set_json_info(self):
board_size = int(self._init_game_state['board_size'])
self._board_size = board_size
self._step_count = int(self._init_game_state['step_count'])
board_array = json.loads(self._init_game_state['board'])
self._board = np.ones((board_size, board_size)).astype(np.uint8)
self._board *= constants.Item.Passage.value
for x in range(self._board_size):
for y in range(self._board_size):
self._board[x, y] = board_array[x][y]
self._items = {}
item_array = json.loads(self._init_game_state['items'])
for i in item_array:
self._items[tuple(i[0])] = i[1]
agent_array = json.loads(self._init_game_state['agents'])
for a in agent_array:
agent = next(x for x in self._agents \
if x.agent_id == a['agent_id'])
agent.set_start_position((a['position'][0], a['position'][1]))
agent.reset(
int(a['ammo']), bool(a['is_alive']), int(a['blast_strength']),
bool(a['can_kick']))
self._bombs = []
bomb_array = json.loads(self._init_game_state['bombs'])
for b in bomb_array:
bomber = next(x for x in self._agents \
if x.agent_id == b['bomber_id'])
moving_direction = b['moving_direction']
if moving_direction is not None:
moving_direction = constants.Action(moving_direction)
self._bombs.append(
characters.Bomb(bomber, tuple(b['position']), int(b['life']),
int(b['blast_strength']), moving_direction))
self._flames = []
flame_array = json.loads(self._init_game_state['flames'])
for f in flame_array:
self._flames.append(
characters.Flame(tuple(f['position']), f['life']))
| true
| true
|
f7170a4a00a9b5ea431fcc159ff237369b376a27
| 5,708
|
py
|
Python
|
syft/frameworks/torch/fl/utils.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | 1
|
2020-05-25T13:44:29.000Z
|
2020-05-25T13:44:29.000Z
|
syft/frameworks/torch/fl/utils.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | 2
|
2020-03-09T09:17:06.000Z
|
2020-04-09T13:33:12.000Z
|
syft/frameworks/torch/fl/utils.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | 1
|
2022-03-06T06:22:21.000Z
|
2022-03-06T06:22:21.000Z
|
import syft as sy
import torch
from typing import Dict
from typing import Any
import logging
logger = logging.getLogger(__name__)
def extract_batches_per_worker(federated_train_loader: sy.FederatedDataLoader):
"""Extracts the batches from the federated_train_loader and stores them
in a dictionary (keys = data.location).
Args:
federated_train_loader: the connection object we use to send responses.
back to the client.
"""
logging_interval = 100
batches = {}
for worker_id in federated_train_loader.workers:
worker = federated_train_loader.federated_dataset.datasets[worker_id].location
batches[worker] = []
for batch_idx, (data, target) in enumerate(federated_train_loader):
if batch_idx % logging_interval == 0:
logger.debug("Extracted %s batches from federated_train_loader", batch_idx)
batches[data.location].append((data, target))
return batches
def add_model(dst_model, src_model):
"""Add the parameters of two models.
Args:
dst_model (torch.nn.Module): the model to which the src_model will be added.
src_model (torch.nn.Module): the model to be added to dst_model.
Returns:
torch.nn.Module: the resulting model of the addition.
"""
params1 = src_model.named_parameters()
params2 = dst_model.named_parameters()
dict_params2 = dict(params2)
with torch.no_grad():
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].set_(param1.data + dict_params2[name1].data)
return dst_model
def scale_model(model, scale):
"""Scale the parameters of a model.
Args:
model (torch.nn.Module): the models whose parameters will be scaled.
scale (float): the scaling factor.
Returns:
torch.nn.Module: the module with scaled parameters.
"""
params = model.named_parameters()
dict_params = dict(params)
with torch.no_grad():
for name, param in dict_params.items():
dict_params[name].set_(dict_params[name].data * scale)
return model
def federated_avg(models: Dict[Any, torch.nn.Module]) -> torch.nn.Module:
"""Calculate the federated average of a dictionary containing models.
The models are extracted from the dictionary
via the models.values() command.
Args:
models (Dict[Any, torch.nn.Module]): a dictionary of models
for which the federated average is calculated.
Returns:
torch.nn.Module: the module with averaged parameters.
"""
nr_models = len(models)
model_list = list(models.values())
model = model_list[0]
for i in range(1, nr_models):
model = add_model(model, model_list[i])
model = scale_model(model, 1.0 / nr_models)
return model
def accuracy(pred_softmax, target):
"""Calculate the accuray of a given prediction.
This functions assumes pred_softmax to be converted into the final prediction by taking the argmax.
Args:
pred_softmax: array type(float), providing nr_classes values per element in target.
target: array type(int), correct classes, taking values in range [0, nr_classes).
Returns:
accuracy: float, fraction of correct predictions.
"""
nr_elems = len(target)
pred = pred_softmax.argmax(dim=1)
return (pred.float() == target.view(pred.shape).float()).sum().numpy() / float(nr_elems)
def create_gaussian_mixture_toy_data(nr_samples: int): # pragma: no cover
""" Create a simple toy data for binary classification
The data is drawn from two normal distributions
target = 1: mu = 2, sigma = 1
target = 0: mu = 0, sigma = 1
The dataset is balanced with an equal number of positive and negative samples
Args:
nr_samples: number of samples to generate
Returns:
data, targets
"""
sample_dim = 2
one_half = int(nr_samples / 2)
X1 = torch.randn(one_half, sample_dim, requires_grad=True) - 5
X2 = torch.randn(one_half, sample_dim, requires_grad=True) + 5
X = torch.cat([X1, X2], dim=0)
Y1 = torch.zeros(one_half, requires_grad=False).long()
Y2 = torch.ones(one_half, requires_grad=False).long()
Y = torch.cat([Y1, Y2], dim=0)
return X, Y
def iris_data_partial():
"""
Returns: 30 samples from the iris data set: https://archive.ics.uci.edu/ml/datasets/iris
"""
data = [
[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[5.4, 3.9, 1.7, 0.4],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
]
target_to_string = {0: "Iris-setosa", 1: "Iris-versicolor", 2: "Iris-virginica"}
targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
data += [
[7.0, 3.2, 4.7, 1.4],
[6.4, 3.2, 4.5, 1.5],
[6.9, 3.1, 4.9, 1.5],
[5.5, 2.3, 4.0, 1.3],
[6.5, 2.8, 4.6, 1.5],
[5.7, 2.8, 4.5, 1.3],
[6.3, 3.3, 4.7, 1.6],
[4.9, 2.4, 3.3, 1.0],
[6.6, 2.9, 4.6, 1.3],
[5.2, 2.7, 3.9, 1.4],
]
targets += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
data += [
[6.3, 3.3, 6.0, 2.5],
[5.8, 2.7, 5.1, 1.9],
[7.1, 3.0, 5.9, 2.1],
[6.3, 2.9, 5.6, 1.8],
[6.5, 3.0, 5.8, 2.2],
[7.6, 3.0, 6.6, 2.1],
[4.9, 2.5, 4.5, 1.7],
[7.3, 2.9, 6.3, 1.8],
[6.7, 2.5, 5.8, 1.8],
[7.2, 3.6, 6.1, 2.5],
]
targets += [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
return torch.tensor(data), torch.tensor(targets)
| 29.884817
| 103
| 0.596181
|
import syft as sy
import torch
from typing import Dict
from typing import Any
import logging
logger = logging.getLogger(__name__)
def extract_batches_per_worker(federated_train_loader: sy.FederatedDataLoader):
logging_interval = 100
batches = {}
for worker_id in federated_train_loader.workers:
worker = federated_train_loader.federated_dataset.datasets[worker_id].location
batches[worker] = []
for batch_idx, (data, target) in enumerate(federated_train_loader):
if batch_idx % logging_interval == 0:
logger.debug("Extracted %s batches from federated_train_loader", batch_idx)
batches[data.location].append((data, target))
return batches
def add_model(dst_model, src_model):
params1 = src_model.named_parameters()
params2 = dst_model.named_parameters()
dict_params2 = dict(params2)
with torch.no_grad():
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].set_(param1.data + dict_params2[name1].data)
return dst_model
def scale_model(model, scale):
params = model.named_parameters()
dict_params = dict(params)
with torch.no_grad():
for name, param in dict_params.items():
dict_params[name].set_(dict_params[name].data * scale)
return model
def federated_avg(models: Dict[Any, torch.nn.Module]) -> torch.nn.Module:
nr_models = len(models)
model_list = list(models.values())
model = model_list[0]
for i in range(1, nr_models):
model = add_model(model, model_list[i])
model = scale_model(model, 1.0 / nr_models)
return model
def accuracy(pred_softmax, target):
nr_elems = len(target)
pred = pred_softmax.argmax(dim=1)
return (pred.float() == target.view(pred.shape).float()).sum().numpy() / float(nr_elems)
def create_gaussian_mixture_toy_data(nr_samples: int):
sample_dim = 2
one_half = int(nr_samples / 2)
X1 = torch.randn(one_half, sample_dim, requires_grad=True) - 5
X2 = torch.randn(one_half, sample_dim, requires_grad=True) + 5
X = torch.cat([X1, X2], dim=0)
Y1 = torch.zeros(one_half, requires_grad=False).long()
Y2 = torch.ones(one_half, requires_grad=False).long()
Y = torch.cat([Y1, Y2], dim=0)
return X, Y
def iris_data_partial():
data = [
[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[5.4, 3.9, 1.7, 0.4],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
]
target_to_string = {0: "Iris-setosa", 1: "Iris-versicolor", 2: "Iris-virginica"}
targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
data += [
[7.0, 3.2, 4.7, 1.4],
[6.4, 3.2, 4.5, 1.5],
[6.9, 3.1, 4.9, 1.5],
[5.5, 2.3, 4.0, 1.3],
[6.5, 2.8, 4.6, 1.5],
[5.7, 2.8, 4.5, 1.3],
[6.3, 3.3, 4.7, 1.6],
[4.9, 2.4, 3.3, 1.0],
[6.6, 2.9, 4.6, 1.3],
[5.2, 2.7, 3.9, 1.4],
]
targets += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
data += [
[6.3, 3.3, 6.0, 2.5],
[5.8, 2.7, 5.1, 1.9],
[7.1, 3.0, 5.9, 2.1],
[6.3, 2.9, 5.6, 1.8],
[6.5, 3.0, 5.8, 2.2],
[7.6, 3.0, 6.6, 2.1],
[4.9, 2.5, 4.5, 1.7],
[7.3, 2.9, 6.3, 1.8],
[6.7, 2.5, 5.8, 1.8],
[7.2, 3.6, 6.1, 2.5],
]
targets += [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
return torch.tensor(data), torch.tensor(targets)
| true
| true
|
f7170ac6132a5c5e78915df4b9f637eb7aa8674a
| 6,819
|
py
|
Python
|
pool/models.py
|
tammer/pool
|
bacf2da7106ba9eaff7d4bde3b6e04823cd6a515
|
[
"MIT"
] | null | null | null |
pool/models.py
|
tammer/pool
|
bacf2da7106ba9eaff7d4bde3b6e04823cd6a515
|
[
"MIT"
] | null | null | null |
pool/models.py
|
tammer/pool
|
bacf2da7106ba9eaff7d4bde3b6e04823cd6a515
|
[
"MIT"
] | null | null | null |
# -*- coding: future_fstrings -*-
from django.db import models
from django.contrib.auth.models import User
import datetime
from pytz import timezone
def now():
# return Main.objects.all().first().now
return datetime.datetime.now()
def set_now(d):
m = Main.objects.all().first()
m.now = d
m.save()
class Team(models.Model):
full_name = models.CharField(max_length=50)
short_name = models.CharField(max_length=3)
nick_name = models.CharField(max_length=50)
city_name = models.CharField(max_length=50)
class Game(models.Model):
week_number = models.IntegerField()
game_number = models.IntegerField()
fav = models.ForeignKey(Team, related_name='fav_games', on_delete=models.CASCADE)
udog = models.ForeignKey(Team, related_name='udog_games', on_delete=models.CASCADE)
spread = models.IntegerField( null=True )
game_date = models.DateTimeField()
fav_score = models.IntegerField( null=True )
udog_score = models.IntegerField( null=True )
fav_is_home = models.BooleanField()
class Meta:
constraints = [
models.UniqueConstraint(fields=['week_number', 'game_number'], name='unique_week_game'),
#spread >=0
]
def totalPoints(self):
if self.fav_score is None or self.udog_score is None:
return None
else:
return self.fav_score+self.udog_score
# if HOU is 3.5 points over ARI, then setFav(HOU,3)
# where HOU is_a Team object
def setFav(self,fav,spread):
if spread < 0:
raise(NameError('spread must be positive'))
if type(fav) is str:
raise(NameError('you sent a string as fav to setFav. Send a Team object'))
if fav != self.fav and fav != self.udog:
raise(NameError(f'{fav.nick_name} not playing in this game! (I am game {self.game_number}, {self.fav.nick_name} v {self.udog.nick_name})'))
self.spread = spread
if self.fav != fav:
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
def save(self, *args, **kwargs):
if not(self.spread is None) and self.spread < 0:
self.spread = -self.spread
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
super(Game, self).save(*args, **kwargs)
def favFullName(self):
if self.fav_is_home:
return self.fav.full_name.upper()
else:
return self.fav.full_name.lower()
def udogFullName(self):
if not(self.fav_is_home):
return self.udog.full_name.upper()
else:
return self.udog.full_name.lower()
def favShortName(self):
if self.fav_is_home:
return self.fav.short_name.upper()
else:
return self.fav.short_name.lower()
def udogShortName(self):
if not(self.fav_is_home):
return self.udog.short_name.upper()
else:
return self.udog.short_name.lower()
def favNickName(self):
if self.fav_is_home:
return self.fav.nick_name.upper()
else:
return self.fav.nick_name.lower()
def udogNickName(self):
if not(self.fav_is_home):
return self.udog.nick_name.upper()
else:
return self.udog.nick_name.lower()
def homeNickName(self):
if self.fav_is_home:
return self.fav.nick_name
else:
return self.udog.nick_name
def awayNickName(self):
if self.fav_is_home:
return self.udog.nick_name
else:
return self.fav.nick_name
def isClosed(self, current_time = None):
if current_time is None:
current_time = now()
if self.game_date.weekday() == 0: # Monday
distance_to_sunday = -1
else:
distance_to_sunday = 6 - self.game_date.weekday()
current_sunday = self.game_date + datetime.timedelta(distance_to_sunday)
current_sunday = current_sunday.replace(hour=13, minute=0, second=0)
if current_time > current_sunday or current_time > self.game_date:
return True
else:
return False
def isOver(self):
if self.fav_score is None or self.udog_score is None:
return False
else:
return True
def isOpen(self, current_time = None):
return not(self.isClosed(current_time = current_time))
def favWins(self):
# throw exception if scores are not filled in
if self.fav_score - self.udog_score > self.spread:
return True
else:
return False
def as_string(self):
return f'{self.week_number}/{self.game_number}\n{self.game_date.strftime("%m/%d/%Y, %H:%M:%S")}\n{self.favNickName()} {self.fav_score}\t{self.spread}.5\t{self.udogNickName()} {self.udog_score}'
class Pick(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
game_number = models.IntegerField()
picked_fav = models.BooleanField()
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.get(game_number=self.game_number,week_number=self.week_number).isClosed():
# You can't change this pick!
err = f'Not actually saving. You are trying to change a pick for a game that isClosed. week: {self.week_number} game:{self.game_number}. If you want to do this use force=True'
print(err)
else:
super(Pick, self).save(*args, **kwargs)
def game(self):
return Game.objects.get(week_number=self.week_number, game_number=self.game_number)
def whoShortName(self):
if self.picked_fav:
return self.game().favShortName()
else:
return self.game().udogShortName()
def isCorrect(self):
game = Game.objects.get(week_number=self.week_number, game_number=self.game_number)
if game.isOver():
return self.picked_fav and game.favWins() or not(self.picked_fav) and not(game.favWins())
else:
return False;
class Monday(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
total_points = models.IntegerField(null=True)
def bonus(self):
monday_game = Game.objects.filter(week_number=self.week_number).order_by('game_number').last()
tp = monday_game.totalPoints()
if tp is None:
return 0.0
else:
return 1 / ( 1 + abs( tp - self.total_points - 0.1 ) )
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.filter(week_number=self.week_number).order_by('game_number').last().isClosed():
err = f'Not actually saving. You are trying to change MNTP for a game that isClosed. week: {self.week_number}. If you want to do this use force=True'
print(err)
else:
super(Monday, self).save(*args, **kwargs)
class Bank(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
deposit_amount = models.FloatField()
note = models.CharField(max_length=50, default='')
transaction_date = models.DateTimeField( auto_now=True, blank=False)
class Blog(models.Model):
entry_date = models.DateTimeField( auto_now=True, blank=False)
entry = models.CharField(max_length=2048, default='')
# only used in development
class Main(models.Model):
now = models.DateTimeField( auto_now=False, blank=False)
| 29.519481
| 195
| 0.723713
|
from django.db import models
from django.contrib.auth.models import User
import datetime
from pytz import timezone
def now():
return datetime.datetime.now()
def set_now(d):
m = Main.objects.all().first()
m.now = d
m.save()
class Team(models.Model):
full_name = models.CharField(max_length=50)
short_name = models.CharField(max_length=3)
nick_name = models.CharField(max_length=50)
city_name = models.CharField(max_length=50)
class Game(models.Model):
week_number = models.IntegerField()
game_number = models.IntegerField()
fav = models.ForeignKey(Team, related_name='fav_games', on_delete=models.CASCADE)
udog = models.ForeignKey(Team, related_name='udog_games', on_delete=models.CASCADE)
spread = models.IntegerField( null=True )
game_date = models.DateTimeField()
fav_score = models.IntegerField( null=True )
udog_score = models.IntegerField( null=True )
fav_is_home = models.BooleanField()
class Meta:
constraints = [
models.UniqueConstraint(fields=['week_number', 'game_number'], name='unique_week_game'),
]
def totalPoints(self):
if self.fav_score is None or self.udog_score is None:
return None
else:
return self.fav_score+self.udog_score
def setFav(self,fav,spread):
if spread < 0:
raise(NameError('spread must be positive'))
if type(fav) is str:
raise(NameError('you sent a string as fav to setFav. Send a Team object'))
if fav != self.fav and fav != self.udog:
raise(NameError(f'{fav.nick_name} not playing in this game! (I am game {self.game_number}, {self.fav.nick_name} v {self.udog.nick_name})'))
self.spread = spread
if self.fav != fav:
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
def save(self, *args, **kwargs):
if not(self.spread is None) and self.spread < 0:
self.spread = -self.spread
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
super(Game, self).save(*args, **kwargs)
def favFullName(self):
if self.fav_is_home:
return self.fav.full_name.upper()
else:
return self.fav.full_name.lower()
def udogFullName(self):
if not(self.fav_is_home):
return self.udog.full_name.upper()
else:
return self.udog.full_name.lower()
def favShortName(self):
if self.fav_is_home:
return self.fav.short_name.upper()
else:
return self.fav.short_name.lower()
def udogShortName(self):
if not(self.fav_is_home):
return self.udog.short_name.upper()
else:
return self.udog.short_name.lower()
def favNickName(self):
if self.fav_is_home:
return self.fav.nick_name.upper()
else:
return self.fav.nick_name.lower()
def udogNickName(self):
if not(self.fav_is_home):
return self.udog.nick_name.upper()
else:
return self.udog.nick_name.lower()
def homeNickName(self):
if self.fav_is_home:
return self.fav.nick_name
else:
return self.udog.nick_name
def awayNickName(self):
if self.fav_is_home:
return self.udog.nick_name
else:
return self.fav.nick_name
def isClosed(self, current_time = None):
if current_time is None:
current_time = now()
if self.game_date.weekday() == 0:
distance_to_sunday = -1
else:
distance_to_sunday = 6 - self.game_date.weekday()
current_sunday = self.game_date + datetime.timedelta(distance_to_sunday)
current_sunday = current_sunday.replace(hour=13, minute=0, second=0)
if current_time > current_sunday or current_time > self.game_date:
return True
else:
return False
def isOver(self):
if self.fav_score is None or self.udog_score is None:
return False
else:
return True
def isOpen(self, current_time = None):
return not(self.isClosed(current_time = current_time))
def favWins(self):
if self.fav_score - self.udog_score > self.spread:
return True
else:
return False
def as_string(self):
return f'{self.week_number}/{self.game_number}\n{self.game_date.strftime("%m/%d/%Y, %H:%M:%S")}\n{self.favNickName()} {self.fav_score}\t{self.spread}.5\t{self.udogNickName()} {self.udog_score}'
class Pick(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
game_number = models.IntegerField()
picked_fav = models.BooleanField()
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.get(game_number=self.game_number,week_number=self.week_number).isClosed():
err = f'Not actually saving. You are trying to change a pick for a game that isClosed. week: {self.week_number} game:{self.game_number}. If you want to do this use force=True'
print(err)
else:
super(Pick, self).save(*args, **kwargs)
def game(self):
return Game.objects.get(week_number=self.week_number, game_number=self.game_number)
def whoShortName(self):
if self.picked_fav:
return self.game().favShortName()
else:
return self.game().udogShortName()
def isCorrect(self):
game = Game.objects.get(week_number=self.week_number, game_number=self.game_number)
if game.isOver():
return self.picked_fav and game.favWins() or not(self.picked_fav) and not(game.favWins())
else:
return False;
class Monday(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
total_points = models.IntegerField(null=True)
def bonus(self):
monday_game = Game.objects.filter(week_number=self.week_number).order_by('game_number').last()
tp = monday_game.totalPoints()
if tp is None:
return 0.0
else:
return 1 / ( 1 + abs( tp - self.total_points - 0.1 ) )
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.filter(week_number=self.week_number).order_by('game_number').last().isClosed():
err = f'Not actually saving. You are trying to change MNTP for a game that isClosed. week: {self.week_number}. If you want to do this use force=True'
print(err)
else:
super(Monday, self).save(*args, **kwargs)
class Bank(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
deposit_amount = models.FloatField()
note = models.CharField(max_length=50, default='')
transaction_date = models.DateTimeField( auto_now=True, blank=False)
class Blog(models.Model):
entry_date = models.DateTimeField( auto_now=True, blank=False)
entry = models.CharField(max_length=2048, default='')
# only used in development
class Main(models.Model):
now = models.DateTimeField( auto_now=False, blank=False)
| true
| true
|
f7170ac8f8e6a649b111e7906e5760a225c20807
| 619
|
py
|
Python
|
flexible_reports/migrations/0009_auto_20171025_0558.py
|
mpasternak/django-flexible-reports
|
cdf62590efb2937b30e19952a67afbc3a3e1c192
|
[
"MIT"
] | 2
|
2017-08-31T11:55:26.000Z
|
2018-07-14T19:39:05.000Z
|
flexible_reports/migrations/0009_auto_20171025_0558.py
|
mpasternak/django-flexible-reports
|
cdf62590efb2937b30e19952a67afbc3a3e1c192
|
[
"MIT"
] | 1
|
2017-08-24T07:04:46.000Z
|
2017-09-23T14:39:06.000Z
|
flexible_reports/migrations/0009_auto_20171025_0558.py
|
mpasternak/django-flexible-reports
|
cdf62590efb2937b30e19952a67afbc3a3e1c192
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-25 10:58
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flexible_reports', '0008_auto_20171025_0553'),
]
operations = [
migrations.AlterField(
model_name='reportelement',
name='datasource',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='flexible_reports.Datasource', verbose_name='Datasource'),
),
]
| 28.136364
| 165
| 0.678514
|
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flexible_reports', '0008_auto_20171025_0553'),
]
operations = [
migrations.AlterField(
model_name='reportelement',
name='datasource',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='flexible_reports.Datasource', verbose_name='Datasource'),
),
]
| true
| true
|
f7170bc3e6ea26e09566be6cd362d8f830bcb772
| 4,016
|
py
|
Python
|
InvenTree/InvenTree/urls.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | null | null | null |
InvenTree/InvenTree/urls.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | 8
|
2020-06-06T01:14:46.000Z
|
2022-03-12T00:14:35.000Z
|
InvenTree/InvenTree/urls.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | null | null | null |
"""
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from qr_code import urls as qr_code_urls
from company.urls import company_urls
from company.urls import supplier_part_urls
from company.urls import price_break_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import po_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import IndexView, SearchView, SettingsView, EditUserView, SetPasswordView
from .views import InfoView
from users.urls import user_urls, user_api_urls
from access.urls import access_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^common/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^po/', include(po_api_urls)),
# User URLs
url(r'^user/', include(user_api_urls)),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='inventree-info'),
]
settings_urls = [
url(r'^user/?', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings-user'),
url(r'^currency/?', SettingsView.as_view(template_name='InvenTree/settings/currency.html'), name='settings-currency'),
url(r'^part/?', SettingsView.as_view(template_name='InvenTree/settings/part.html'), name='settings-part'),
url(r'^other/?', SettingsView.as_view(template_name='InvenTree/settings/other.html'), name='settings-other'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings'),
]
urlpatterns = [
# User URLs
url(r'^user/', include(user_urls)),
url(r'^access/', include(access_urls)),
url(r'^part/', include(part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^price-break/', include(price_break_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', auth_views.LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
url(r'^qr_code/', include(qr_code_urls, namespace='qr_code')),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
]
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| 34.033898
| 122
| 0.728088
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from qr_code import urls as qr_code_urls
from company.urls import company_urls
from company.urls import supplier_part_urls
from company.urls import price_break_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import po_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import IndexView, SearchView, SettingsView, EditUserView, SetPasswordView
from .views import InfoView
from users.urls import user_urls, user_api_urls
from access.urls import access_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^common/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^po/', include(po_api_urls)),
url(r'^user/', include(user_api_urls)),
url(r'^$', InfoView.as_view(), name='inventree-info'),
]
settings_urls = [
url(r'^user/?', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings-user'),
url(r'^currency/?', SettingsView.as_view(template_name='InvenTree/settings/currency.html'), name='settings-currency'),
url(r'^part/?', SettingsView.as_view(template_name='InvenTree/settings/part.html'), name='settings-part'),
url(r'^other/?', SettingsView.as_view(template_name='InvenTree/settings/other.html'), name='settings-other'),
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings'),
]
urlpatterns = [
url(r'^user/', include(user_urls)),
url(r'^access/', include(access_urls)),
url(r'^part/', include(part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^price-break/', include(price_break_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', auth_views.LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
url(r'^qr_code/', include(qr_code_urls, namespace='qr_code')),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| true
| true
|
f7170c3b4209a882e4b52313adf31afa74155a64
| 2,107
|
py
|
Python
|
main.py
|
flace/text-generator
|
4392a4071a0d203b0b5f814c77fb7b0943acad79
|
[
"MIT"
] | null | null | null |
main.py
|
flace/text-generator
|
4392a4071a0d203b0b5f814c77fb7b0943acad79
|
[
"MIT"
] | null | null | null |
main.py
|
flace/text-generator
|
4392a4071a0d203b0b5f814c77fb7b0943acad79
|
[
"MIT"
] | null | null | null |
import sys
import os
import json
import time
from nlp_tools import tokenize_words, build_prob_language_model, generate_text_using_trigrams
from files_io import load_text, serialize, deserialize
if __name__ == '__main__':
args = sys.argv[1:]
N_WORDS = args[0]
FILES_FOLDER = 'files'
TMP_FOLDER = os.path.join(FILES_FOLDER, 'tmp')
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
#print("loading text..")
T0 = time.time()
source_text_filename = 'eng_wiki.txt'
source_text_path = os.path.join(FILES_FOLDER, source_text_filename)
en_text = load_text(source_text_path)
T1 = time.time()
#print("\tdone in {0:.2f} sec".format(T1-T0))
tmp_folder_content = os.listdir(TMP_FOLDER)
# If temp folder doesn't yet contain serialized token list, tokenize text and serialize tokens
if len(tmp_folder_content) == 0:
#print("tokenizing words..")
tokens = tokenize_words(en_text)
tokenized_words_filename = 'tokenized_' + source_text_filename
tokenized_words_path = os.path.join(TMP_FOLDER, tokenized_words_filename)
serialize(tokens, tokenized_words_path)
# Otherwise, deserialize tokens list. Much faster, as no need to wait tokenization time
else:
#print("temporary file found. Deserializing words..")
tokenized_words_filename = tmp_folder_content[0]
tokenized_words_path = os.path.join(TMP_FOLDER, tokenized_words_filename)
tokens = deserialize(tokenized_words_path)
T2 = time.time()
#print("\tdone in {0:.2f} sec".format(T2-T1))
#print("building probabilistic language model..")
language_model = build_prob_language_model(tokens)
T3 = time.time()
#print("\tdone in {0:.2f} sec".format(T3-T2))
#print("generating output text..")
final_text = generate_text_using_trigrams(language_model, N_WORDS)
T4 = time.time()
#print("\tdone in {0:.2f} sec".format(T4-T3))
#print("\ntotal execution time: {0:.2f} sec".format(T4-T0))
response = {'error': 'false', 'data': final_text}
print(json.dumps(response))
| 36.964912
| 98
| 0.6972
|
import sys
import os
import json
import time
from nlp_tools import tokenize_words, build_prob_language_model, generate_text_using_trigrams
from files_io import load_text, serialize, deserialize
if __name__ == '__main__':
args = sys.argv[1:]
N_WORDS = args[0]
FILES_FOLDER = 'files'
TMP_FOLDER = os.path.join(FILES_FOLDER, 'tmp')
if not os.path.exists(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
T0 = time.time()
source_text_filename = 'eng_wiki.txt'
source_text_path = os.path.join(FILES_FOLDER, source_text_filename)
en_text = load_text(source_text_path)
T1 = time.time()
tmp_folder_content = os.listdir(TMP_FOLDER)
if len(tmp_folder_content) == 0:
#print("tokenizing words..")
tokens = tokenize_words(en_text)
tokenized_words_filename = 'tokenized_' + source_text_filename
tokenized_words_path = os.path.join(TMP_FOLDER, tokenized_words_filename)
serialize(tokens, tokenized_words_path)
# Otherwise, deserialize tokens list. Much faster, as no need to wait tokenization time
else:
#print("temporary file found. Deserializing words..")
tokenized_words_filename = tmp_folder_content[0]
tokenized_words_path = os.path.join(TMP_FOLDER, tokenized_words_filename)
tokens = deserialize(tokenized_words_path)
T2 = time.time()
#print("\tdone in {0:.2f} sec".format(T2-T1))
#print("building probabilistic language model..")
language_model = build_prob_language_model(tokens)
T3 = time.time()
#print("\tdone in {0:.2f} sec".format(T3-T2))
#print("generating output text..")
final_text = generate_text_using_trigrams(language_model, N_WORDS)
T4 = time.time()
#print("\tdone in {0:.2f} sec".format(T4-T3))
#print("\ntotal execution time: {0:.2f} sec".format(T4-T0))
response = {'error': 'false', 'data': final_text}
print(json.dumps(response))
| true
| true
|
f7170d095793806b6b0978bf391117e084b45d00
| 789
|
py
|
Python
|
freezer_api/__init__.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | 22
|
2015-10-18T02:53:47.000Z
|
2021-09-19T10:38:12.000Z
|
freezer_api/__init__.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
freezer_api/__init__.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | 20
|
2016-03-08T08:34:56.000Z
|
2020-10-13T06:50:05.000Z
|
# (c) Copyright 2016 Hewlett-Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Freezer-api Version
import pbr.version
__version__ = pbr.version.VersionInfo('freezer-api').version_string()
version_info = pbr.version.VersionInfo('freezer-api')
| 35.863636
| 74
| 0.768061
|
import pbr.version
__version__ = pbr.version.VersionInfo('freezer-api').version_string()
version_info = pbr.version.VersionInfo('freezer-api')
| true
| true
|
f7170dafbded2faa0073fa4e84b225d8c30b922d
| 9,839
|
py
|
Python
|
ising_low.py
|
kartik-gatsby/optimized-ising-model
|
1a9b0210deb26d73f93aec5b0804baaebf9c6ff9
|
[
"MIT"
] | 1
|
2020-06-07T00:11:17.000Z
|
2020-06-07T00:11:17.000Z
|
ising_low.py
|
kartik-gatsby/optimized-ising-model
|
1a9b0210deb26d73f93aec5b0804baaebf9c6ff9
|
[
"MIT"
] | null | null | null |
ising_low.py
|
kartik-gatsby/optimized-ising-model
|
1a9b0210deb26d73f93aec5b0804baaebf9c6ff9
|
[
"MIT"
] | null | null | null |
import numpy as np
from random import random
import matplotlib.pyplot as plt
import time
import logging
logging.basicConfig(level=logging.INFO,filename='simulation.log', filemode='w',format='%(asctime)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')
np.seterr(all='warn')
#################################################
# #
# SIMULATION MACROS #
# #
#################################################
"""__________________________________________
Simulation MACROs:
T_max and T_min is range of temperature.
nt is number of Temperature points.
sweeps are number of mc steps per spin.
min_meas is minimum number Measurement.
j_knife_factor is jack knife factor is used when number of measurement interval < 2 x Correlation time.
All some_variables0 are default value.
------------------------------------------"""
logging.info("Starting Ising Model Simulation")
T_min = 1.5; T_max = 3
nt = int((T_max-T_min)*10+1)
sweeps0 = 1000
max_sweeps = sweeps0*10
min_meas = 100
j_knife_factor0 = 1
startTime = time.time()
T = np.linspace(T_min, T_max, nt)
"""
We will work with expanding lattices. We will store expanded lattice for particular temperature. Stored lattice would be used as initial configuration for higher dimenssion lattic size. We have two methods for expanding lattice: zooming and stacking. We recommend stacking for use.
"""
states = {_: None for _ in T}
#lattice_sizes = 3**(np.arange(2,5))
################OR##################
lattice_sizes = 2**(np.arange(4,8))
#################################################
# #
# FUNCTIONS #
# #
#################################################
"""Onsagar's solutions"""
def onsagar_specific_heat(X):
const = -(2/2.269)**2*2/np.pi
return const*np.log(abs(np.ones(len(X))-X/2.269))
def onsagar_mag(X):
lst1 = (1-(np.sinh(np.log(1+np.sqrt(2))*2.269/X[X<2.269]))**(-4))**(1/8)
lst2 = 0*X[X>=2.269]
return np.concatenate((lst1,lst2))
"""Monte Carlo Metropolis algorithm"""
def monteCarlo(n, state, energy, mag, beta, sweeps,max_sweeps):
if sweeps > max_sweeps:
sweeps = max_sweeps
exp_betas = np.exp(-beta*np.arange(0,9))
energies, mags = np.zeros(sweeps), np.zeros(sweeps)
# random state indices
J = np.random.randint(0, n, size=(sweeps, n*n))
K = np.random.randint(0, n, size=(sweeps, n*n))
#loop
for t in range(sweeps):
for tt in range(n*n):
# random indices
j, k = J[t, tt], K[t, tt]
s = state[j,k]
neighbour_sum = (state[(j-1)%n, k] +
state[j, (k-1)%n] + state[j, (k+1)%n] +
state[(j+1)%n, k])
energy_diff = 2*s*neighbour_sum
if energy_diff < 0 or random() < exp_betas[energy_diff]:
s *= -1
energy += energy_diff
mag += 2*s
state[j, k] = s
energies[t], mags[t] = energy, mag
return energies, mags
"""Calculation of auto-correlation"""
def autocorrelation(M):
start_time = time.time()
tau = 1
sweeps = len(M)
auto = np.zeros(sweeps)
for t in range(sweeps):
some_time = sweeps-t
first_term = np.average(M[:some_time]*M[t:sweeps])
S1 = np.average(M[:some_time])
S2 = np.average(M[t:sweeps])
auto_temp = first_term - S1*S2
if auto_temp > 0:
auto[t] = auto_temp
else:#remove oscillating part
break
if auto[0] != 0:
auto = auto[auto>0]
auto = auto/auto[0] #normalization
len_auto = len(auto)
if len_auto > 1: #draw a straight line if you have atleast two points
tau = int(-1/np.polyfit(np.arange(len_auto), np.log(auto), 1, w=np.sqrt(auto))[0])
tau = max(tau,1)
logging.info(f"Correlation time = {tau}")
return tau
"""
Calculation of specific heat or Susceptibility and errorbar.
CX is Specific Heat or Susceptibility.
CX_i is Specific Heat or Susceptibility without i-th measurement.
"""
def jackKnife(EM,factor=1):
n = len(EM)
CX = np.var(EM)
CX_i = np.zeros(n)
for i in range(n):
CX_i[i] = np.var(np.delete(EM,i))
under = np.sum(np.square(np.full(n,CX) - CX_i))
CX_err = np.sqrt(under*factor)
return CX, CX_err
"""
Stacking Lattices: Stacking z lattice and taking advantage of periodic boundary condition. The energy and magnetization would also increase as system size increase as they are extensive state variables. Other trick to explore is Zoom.
"""
def stackLattice(z,state,energy,mag):
h_stack_state = state
for _ in range(z-1):
h_stack_state = np.hstack((h_stack_state,state))
v_stack_state = h_stack_state
for _ in range(z-1):
v_stack_state = np.vstack((v_stack_state,h_stack_state))
return (v_stack_state, z*z*energy, z*z*mag)
#################################################
# #
# MAIN #
# #
#################################################
"""we will plot the following wrt temperature, T"""
plotEnergy = np.zeros(nt)
plotMag = np.zeros(nt)
plotChi = np.zeros(nt)
plotChi_err = np.zeros(nt)
plotSH = np.zeros(nt)
plotSH_err = np.zeros(nt)
plotCorrelation = np.zeros(nt)
"""
Preparing n x n lattice with all spins up.
Here, z is a zoom factor or a stacking factor.
"""
n = min(lattice_sizes)
N = n*n
z = lattice_sizes[1]//lattice_sizes[0]
state = np.ones((n,n),dtype="int")
energy, mag = -N, N
"""lattice size loop"""
for n in lattice_sizes:
logging.info(f"Lattice size is {n}x{n}")
print(f"Lattice size is {n}x{n}")
N = n*n
"""temperature loop"""
for k in range(nt):
temp = T[k]
Beta=1/temp
if states[temp] != None:
(state,energy,mag) = states[temp]
logging.info("_"*35)
logging.info("Temperature is %0.2f, time elapsed %d" %(temp,time.time()-startTime))
sweeps = sweeps0; j_knife_factor = j_knife_factor0; measurements = 0
E, M = np.zeros(0), np.zeros(0)
while measurements < min_meas:
energies, mags = monteCarlo(n, state, energy, mag, Beta, sweeps, max_sweeps//10)
energy, mag = energies[-1], mags[-1]
E = np.concatenate((E,energies))
M = np.concatenate((M,mags))
delta_int = eq_time = 2*autocorrelation(M)
measurements = len(E[eq_time::delta_int])
logging.info(f"{measurements} measurements are possible")
if measurements < min_meas:
_energies_ = len(E)
if _energies_ < max_sweeps:
sweeps = delta_int*(min_meas-measurements)
logging.info(f"\tdoing {sweeps} more sweeps")
else:
delta_int = (_energies_-eq_time)//min_meas
j_knife_factor = eq_time/delta_int
measurements = len(E[eq_time::delta_int])
logging.info(f"We will do {measurements} measurements")
#doing measurements
E = E[eq_time::delta_int]
M = M[eq_time::delta_int]
plotMag[k] = np.average(M)/N
Chi, Chi_err = jackKnife(M,j_knife_factor)
plotChi[k] =Chi*Beta/N
plotChi_err[k] =Chi_err*Beta/N
plotEnergy[k] = np.average(E)/N
sp_heat, sp_heat_err = jackKnife(E,j_knife_factor)
plotSH[k] = sp_heat*Beta*Beta/N
plotSH_err[k] = sp_heat_err*Beta*Beta/N
plotCorrelation[k] = eq_time//2
#lattice expansion
states[temp] = stackLattice(z,state,energy,mag)
#states[temp] = zoomLattice(z,state,energy,mag)
#PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS#
f = plt.figure(figsize=(16, 9));
title_name = "Size:"+str(n)+"x"+str(n)
plt.title(title_name, color='b');
sp = f.add_subplot(2, 2, 1 );
plt.scatter(T, plotEnergy, s=50, marker='o', color='IndianRed')
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Energy ", fontsize=20); plt.axis('tight');
sp = f.add_subplot(2, 2, 2 );
plt.scatter(T, abs(np.array(plotMag)), s=50, marker='o', color='IndianRed', label = "data")
temp_list = np.linspace(T_min, T_max, 10000)
plt.plot(temp_list, onsagar_mag(temp_list) , color='blue', label = "Onsager Solution")
plt.legend()
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Magnetization ", fontsize=20); plt.axis('tight');
sp = f.add_subplot(2, 2, 3 );
plt.errorbar(T, plotSH, yerr = plotSH_err, fmt='o', color='IndianRed', label = "data")
plt.plot(temp_list, onsagar_specific_heat(temp_list), color='RoyalBlue', label = "Onsager Solution")
plt.legend()
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Specific Heat ", fontsize=20); plt.axis('tight');
sp = f.add_subplot(2, 2, 4 );
plt.errorbar(T, plotChi, yerr = plotChi_err, fmt='o', color='IndianRed', label = "data")
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Susceptibility", fontsize=20); plt.axis('tight');
timeIs = time.strftime("%H-%M-%S")
plt.savefig(timeIs+'.pdf')
#storing measurements in in a file
with open(str(n)+"data","w") as file:
file.write("##Temp\tEnergy\tMag\tSp_ht\tSp_ht_err\tChi\tChi_err\ttau\n")
for i in range(nt):
file.write(str(T[i])+"\t"+str(plotEnergy[i])+"\t"+str(plotMag[i])+"\t"+str(plotSH[i])+"\t"+str(plotSH_err[i])+"\t"+str(plotChi[i])+"\t"+str(plotChi_err[i])+"\t"+str(plotCorrelation[i])+"\t"+"\n")
| 38.73622
| 281
| 0.57018
|
import numpy as np
from random import random
import matplotlib.pyplot as plt
import time
import logging
logging.basicConfig(level=logging.INFO,filename='simulation.log', filemode='w',format='%(asctime)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')
np.seterr(all='warn')
| true
| true
|
f7170de985b4d2849b9f1917151cdfb0f523b4b6
| 247
|
py
|
Python
|
test.py
|
namitkewat/cy_aho_corasick
|
e9483f7efe60d3a1f9904d059dedbd021858f31c
|
[
"MIT"
] | 2
|
2021-08-10T03:14:43.000Z
|
2021-08-10T03:17:39.000Z
|
test.py
|
namitkewat/cy_aho_corasick
|
e9483f7efe60d3a1f9904d059dedbd021858f31c
|
[
"MIT"
] | null | null | null |
test.py
|
namitkewat/cy_aho_corasick
|
e9483f7efe60d3a1f9904d059dedbd021858f31c
|
[
"MIT"
] | 2
|
2019-04-26T09:47:35.000Z
|
2021-08-10T03:18:03.000Z
|
import cy_aho_corasick as cy_aho
# print(dir(cyAhoCorasick))
t = cy_aho.Trie(remove_overlaps=True)
t.insert(b"sugar")
print(t.parse_text(b"sugarcane sugarcane sugar canesugar"))
print(t.parse_text_values(b"sugarcane sugarcane sugar canesugar"))
| 27.444444
| 66
| 0.801619
|
import cy_aho_corasick as cy_aho
t = cy_aho.Trie(remove_overlaps=True)
t.insert(b"sugar")
print(t.parse_text(b"sugarcane sugarcane sugar canesugar"))
print(t.parse_text_values(b"sugarcane sugarcane sugar canesugar"))
| true
| true
|
f7170eb00fc11965e5ade80c62bfb984df7625cd
| 14,665
|
py
|
Python
|
fairseq/fairseq_cli/preprocess.py
|
shalei120/AutoInsert
|
89ec5dadd252d79586fe3e44f1d315c5ec938c1f
|
[
"MIT"
] | 28
|
2021-09-15T01:25:00.000Z
|
2022-03-01T20:21:28.000Z
|
fairseq/fairseq_cli/preprocess.py
|
shalei120/AutoInsert
|
89ec5dadd252d79586fe3e44f1d315c5ec938c1f
|
[
"MIT"
] | 2
|
2022-03-25T07:55:12.000Z
|
2022-03-28T12:46:05.000Z
|
fairseq/fairseq_cli/preprocess.py
|
shalei120/AutoInsert
|
89ec5dadd252d79586fe3e44f1d315c5ec938c1f
|
[
"MIT"
] | 2
|
2021-09-14T07:20:11.000Z
|
2021-09-23T08:00:24.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
from fairseq.file_chunker_utils import find_offsets
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
if args.dict_only:
return
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file,
vocab,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| 35.943627
| 88
| 0.561132
|
import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
from fairseq.file_chunker_utils import find_offsets
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
if args.dict_only:
return
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file,
vocab,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = find_offsets(input_file, num_workers)
(first_chunk, *more_chunks) = zip(offsets, offsets[1:])
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id, (start_offset, end_offset) in enumerate(
more_chunks, start=1
):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
start_offset,
end_offset,
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=first_chunk[0],
end=first_chunk[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.