prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for schedules.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from rlax._src import schedules
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
class PolynomialTest(parameterized.TestCase):
def test_linear(self, compile_fn, place_fn):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = schedules.polynomial_schedule(10., 20., 1, 10)
# Optionally compile.
schedule_fn = compile_fn(schedule_fn)
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Optionally convert to device array.
step_count = place_fn(count)
# Compute next value.
generated_vals.append(schedule_fn(step_count))
# Test output.
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, | np.array(generated_vals) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import time
# Rotating hyperplane dataset
def create_hyperplane_dataset(n_samples, n_dim=2, plane_angle=0.45):
w = np.dot(np.array([[np.cos(plane_angle), -np.sin(plane_angle)], [np.sin(plane_angle), | np.cos(plane_angle) | numpy.cos |
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
import numpy as np
from numpy.testing.nosetester import import_nose
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
Notes
-----
.. versionadded:: 1.8.0
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
if sys.version_info.major >= 3:
funcname = nose.tools.assert_raises_regex
else:
# Only present in Python 2.7, missing from unittest in 2.6
funcname = nose.tools.assert_raises_regexp
return funcname(exception_class, expected_regexp, callable_obj,
*args, **kwargs)
if NumpyVersion(np.__version__) >= '1.10.0':
from numpy import broadcast_to
else:
# Definition of `broadcast_to` from numpy 1.10.0.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = | np.array(array, copy=False, subok=subok) | numpy.array |
from linlearn import BinaryClassifier, MultiClassifier
from linlearn.robust_means import Holland_catoni_estimator, gmom, alg2
import numpy as np
import gzip
import logging
import pickle
from datetime import datetime
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from scipy.special import logsumexp, softmax
import os
import itertools
from tqdm import tqdm
import joblib
import time
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
ensure_directory('exp_archives/')
file_handler = logging.FileHandler(filename='exp_archives/classif_exp.log')
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=handlers
)
save_results = False
save_fig= True
dataset="MNIST"
logging.info(64*"=")
logging.info("Running new experiment session ON GPU with dataset : %s" % dataset)
logging.info(64*"=")
m_SVRG = 50
step_size = 0.01
max_iter = 10
fit_intercept = True
n_samples = 1000
n_repeats = 2
logging.info("Parameters are : n_repeats = %d , n_samples = %d , max_ter = %d , fit_intercept=%r , m_SVRG = %d" % (n_repeats, n_samples or 0, max_iter, fit_intercept, m_SVRG))
if not save_results:
logging.info("WARNING : results will NOT be saved at the end of this session")
def _images(path):
"""Return images loaded locally."""
with gzip.open(path) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
return pixels.reshape(-1, 784).astype('float64') / 255
def _labels(path):
"""Return labels loaded locally."""
with gzip.open(path) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
def _onehot(integer_labels):
"""Return matrix whose rows are onehot encodings of integers."""
n_rows = len(integer_labels)
n_cols = integer_labels.max() + 1
onehot = np.zeros((n_rows, n_cols), dtype='uint8')
onehot[np.arange(n_rows), integer_labels] = 1
return onehot
return _onehot(integer_labels)
mnist_train_images_file = "mnist_data/train-images-idx3-ubyte.gz"
mnist_train_labels_file = "mnist_data/train-labels-idx1-ubyte.gz"
mnist_test_images_file = "mnist_data/t10k-images-idx3-ubyte.gz"
mnist_test_labels_file = "mnist_data/t10k-labels-idx1-ubyte.gz"
logging.info("loading data ...")
X_train = _images(mnist_train_images_file)[:n_samples]
y_train = _labels(mnist_train_labels_file)[:n_samples]
X_test = _images(mnist_test_images_file)
y_test = _labels(mnist_test_labels_file)
def l1_apply_single(x, t):
if x > t:
return x - t
elif x < -t:
return x + t
else:
return 0.0
def sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
if fit_intercept:
w0 = w[0] if lnlearn else w[0,:]
w1 = w[1] if lnlearn else w[1:,:]
else:
w0 = 0
w1 = w
scores = X @ w1 + w0
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
obj = (-scores[np.arange(X.shape[0]), np.argmax(y, axis=1)] + logsumexp(scores, axis=1))
return obj
def objective(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
return sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=lnlearn).mean()
def gradient(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.vstack((sftmax[:,:-1].sum(axis=0), X.T @ sftmax[:,:-1]))/X.shape[0]
else:
return (X.T @ sftmax[:,:-1])/X.shape[0] # np.vstack((np.ones((X.shape[0], 1)) @ sftmax[:,:-1], X.T @ sftmax[:,:-1]
def sample_gradients(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.concatenate(
(sftmax[:,np.newaxis,:-1], np.einsum("ij, ik->ijk", X, sftmax[:,:-1])), axis=1)
else:
return np.einsum("ij, ik->ijk", X, sftmax[:,:-1])
# def train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept)
# def test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept)
#
# def linlearn_train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=True)
# def linlearn_test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=True)
# linlearn_tracked_funs = [linlearn_train_loss, linlearn_test_loss]
linlearn_algorithms = ["mom_cgd", "catoni_cgd", "tmean_cgd"]
def train_loss(w, algo_name=""):
return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
def test_loss(w, algo_name=""):
return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
tracked_funs = [train_loss, test_loss]
class Record(object):
def __init__(self, shape, capacity):
self.record = np.zeros(capacity) if shape == 1 else np.zeros(tuple([capacity] + list(shape)))
self.cursor = 0
def update(self, value):
self.record[self.cursor] = value
self.cursor += 1
def __len__(self):
return self.record.shape[0]
def tmean_cgd(X_train, y_train, batch_size=500):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="tmean", fit_intercept=fit_intercept,
thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
# def catoni_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def catoni_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def mom_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches) * X_train.shape[0] + (i % n_batches) * batch_size for i in
# range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def mom_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def SVRG(X, y, grad, m, w0=None, T=max_iter, fit_intercept=fit_intercept, tracked_funs=tracked_funs):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
# w_tilde = w0
# wt = w0
# step = step_size*(X.shape[0]/m + 2)/1000
# tracks = [[obj(w0)] for obj in tracked_funs] + [[0]]
# for i in tqdm(range((T*500)//(X.shape[0] + 2*m) + 1), desc="SVRG"):
# mu = grad(X, y, w_tilde, fit_intercept=fit_intercept)
# additional_gradients = X.shape[0]
# for j in range(m):
# ind = np.random.randint(X.shape[0])
# X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1,:]
# wt -= step*(grad(X_ind, y_ind, wt, fit_intercept=fit_intercept) - grad(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
# additional_gradients += 2
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + additional_gradients)
# additional_gradients = 0
# w_tilde = wt
# return tracks
def SVRG(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
w_tilde = w0
wt = w0
step = step_size/(X.shape[0])
m = X.shape[0]
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SVRG"):
mu = gradient(X, y, w_tilde, fit_intercept=fit_intercept)
for j in range(m):
ind = np.random.randint(X.shape[0])
X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1]
wt -= step*(gradient(X_ind, y_ind, wt, fit_intercept=fit_intercept) - gradient(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
w_tilde = wt
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
def SGD(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
wt = w0
step = step_size/(X.shape[0])
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SGD"):
index = | np.random.randint(X.shape[0]) | numpy.random.randint |
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
np.random.seed(100)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
np.random.seed(1000)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = np.array([2.0])
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = np.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.allclose(res.mfit, mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
np.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
assert np.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.allclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_all_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.npar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = np.sqrt(2.0*test_sexp)
test_sobs = np.sum(self.ps.power - self.optres.p_opt[0])
assert np.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert np.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
npar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * npar
test_bic = self.optres.result + npar * np.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert np.isclose(test_aic, self.optres.aic)
assert np.isclose(test_bic, self.optres.bic)
assert np.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert np.allclose(self.optres.cov, np.asarray(self.opt.hess_inv))
assert np.allclose(self.optres.err, np.sqrt(np.diag(self.opt.hess_inv)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.allclose(optres.cov, hess_inv)
assert np.allclose(optres.err, np.sqrt(np.diag(np.abs(hess_inv))))
def test_print_summary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_summary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
# store all the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert np.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
test_mean = 2.0
test_std = 0.2
assert np.isclose(test_mean, s.mean[0], rtol=0.1)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.linspace(1, 10.0, nfreq)
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
assert np.absolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
pe = PSDParEst(ps)
max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)
assert np.isclose(max_x, ps.freq[mp_ind])
assert max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = np.ones_like(ps.freq)
max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)
assert np.isclose(max_y[0], 2*max_power)
assert np.isclose(max_x[0], ps.freq[mp_ind])
assert max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
max_post=False, seed=seed)
assert maxpow_sim.shape[0] == nsim
assert np.all(maxpow_sim > 9.00) and np.all(maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d( | np.ones(nsim) | numpy.ones |
"""Test correlation and distance correlation estimators."""
import numpy as np
from frites.estimator import CorrEstimator, DcorrEstimator
array_equal = np.testing.assert_array_equal
class TestCorrEstimator(object):
def test_corr_definition(self):
"""Test definition of correlation estimator."""
CorrEstimator()
def test_corr_estimate(self):
"""Test getting the core function."""
x, y = np.random.rand(10, 1, 100), np.random.rand(10, 1, 100)
cat = np.array([0] * 50 + [1] * 50)
est = CorrEstimator()
for func in [0, 1]:
if func == 0: # estimator.get_function()
fcn = est.get_function()
elif func == 1: # estimator.estimate
fcn = est.estimate
# no categories
array_equal(fcn(x[0, 0, :], y[0, 0, :]).shape, (1, 1))
array_equal(fcn(x[0, :, :], y[0, 0, :]).shape, (1, 1))
array_equal(fcn(x, y).shape, (1, 10))
# with categories
array_equal(fcn(x[0, 0, :], y[0, 0, :],
categories=cat).shape, (2, 1))
array_equal(fcn(x[0, :, :], y[0, 0, :],
categories=cat).shape, (2, 1))
array_equal(fcn(x, y, categories=cat).shape, (2, 10))
def test_corr_functional(self):
"""Functional test of the correlation."""
fcn = CorrEstimator().get_function()
# no categories
x, y = np.random.rand(2, 1, 100), np.random.rand(100)
x[1, ...] += y.reshape(1, -1)
corr = fcn(x, y).ravel()
assert corr[0] < corr[1]
# with categories
x, y = np.random.rand(100), | np.random.rand(100) | numpy.random.rand |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import numpy as np
import tensorflow as tf
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from sklearn.model_selection import train_test_split
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import hparam
from google.cloud.storage import blob, bucket, client
import trainer.dataset
import trainer.model
import trainer.ml_helpers
import trainer.top_words
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
"""
def _experiment_fn(config, hparams):
index_to_component = {}
if hparams.train_file:
with open(hparams.train_file) as f:
if hparams.trainer_type == 'spam':
training_data = trainer.ml_helpers.spam_from_file(f)
else:
training_data = trainer.ml_helpers.component_from_file(f)
else:
training_data = trainer.dataset.fetch_training_data(hparams.gcs_bucket,
hparams.gcs_prefix, hparams.trainer_type)
tf.logging.info('Training data received. Len: %d' % len(training_data))
if hparams.trainer_type == 'spam':
X, y = trainer.ml_helpers.transform_spam_csv_to_features(
training_data)
else:
top_list = trainer.top_words.make_top_words_list(hparams.job_dir)
X, y, index_to_component = trainer.ml_helpers \
.transform_component_csv_to_features(training_data, top_list)
tf.logging.info('Features generated')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_train, hparams.trainer_type),
y=np.array(y_train),
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_test, hparams.trainer_type),
y= | np.array(y_test) | numpy.array |
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ctypes
import numpy
import sys
from ctypes import CFUNCTYPE, POINTER, c_double, c_int, c_int32, c_void_p, c_size_t
from numpy.ctypeslib import ndpointer
from .common import SubSolver
from ..model import walk_shape
from ..reparametrization import Reparametrization
class LPSolver(SubSolver):
def get_repametrization(self):
raise NotImplementedError
class TRWS(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
'threads': 1,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self.model = model
self._energy = self._energy_create(model.number_of_variables, model.shape,
sum(1 for x in model.factors if x.number_of_variables > 1))
edge_counter = 0
for i, factor in enumerate(model.factors):
if factor.number_of_variables == 1:
self._energy_add_unary(self._energy, factor.variables[0], factor.data)
elif factor.number_of_variables == 2:
self._energy_add_pairwise(self._energy, edge_counter, *factor.variables, factor.data)
edge_counter += 1
else:
raise RuntimeError('Unsupported factor arity.')
self._energy_finalize(self._energy)
self._solver = self._solver_create(self._energy)
def __del__(self):
if self._energy:
self._energy_destroy(self._energy)
self._energy = None
if self._solver:
self._solver_destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_trws_stub.so')
self._energy_create = self._lib.combilp_trws_stub_energy_create
self._energy_create.argtypes = [c_int32, ndpointer(dtype=c_int32), c_int32]
self._energy_create.restype = c_void_p
self._energy_add_unary = self._lib.combilp_trws_stub_energy_add_unary
self._energy_add_unary.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
self._energy_add_pairwise = self._lib.combilp_trws_stub_energy_add_pairwise
self._energy_add_pairwise.argtypes = [c_void_p, c_int32, c_int32, c_int32, ndpointer(dtype=c_double)]
self._energy_finalize = self._lib.combilp_trws_stub_energy_finalize
self._energy_finalize.argtypes = [c_void_p]
self._energy_destroy = self._lib.combilp_trws_stub_energy_destroy
self._energy_destroy.argtypes = [c_void_p]
self._solver_create = self._lib.combilp_trws_stub_solver_create
self._solver_create.argtypes = [c_void_p]
self._solver_create.restype = c_void_p
self._solve = self._lib.combilp_trws_stub_solve
self._solve.argtypes = [c_void_p, c_int, c_int]
self._solver_destroy = self._lib.combilp_trws_stub_destroy_solver
self._solver_destroy.argtypes = [c_void_p]
self._get_backward_messages = self._lib.combilp_trws_stub_get_backward_messages
self._get_backward_messages.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
def solve(self):
self._solve(self._solver,
self.parameters['max_iterations'],
self.parameters['threads'])
def get_repametrization(self):
repa = Reparametrization(self.model)
edge_counter = 0
for i, factor in enumerate(self.model.factors):
if factor.number_of_variables == 2:
self._get_backward_messages(self._solver, edge_counter,
repa.get_factor(i, 0))
edge_counter += 1
# recompute forward messages
values = repa.get_factor_value(i)
repa_values = repa.get_factor(i, 1)
for label in range(factor.shape[1]):
minimum = values[:,label].min()
repa_values[label] = minimum
return repa
class SRMP(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self._solver = self._create(self.model.number_of_variables, self.model.shape)
for factor in self.model.factors:
assert(factor.data.flags.c_contiguous)
self._add_factor(self._solver, factor.number_of_variables,
factor.variables, factor.data)
def __del__(self):
if self._solver:
self._destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_srmp_stub.so')
self._message_func_type = CFUNCTYPE(None, c_size_t, POINTER(c_int32), c_int32, POINTER(c_double), POINTER(c_double))
self._message_func_type.from_param = self._message_func_type
self._create = self._lib.combilp_srmp_stub_create
self._create.argtypes = [c_int32, | ndpointer(dtype=c_int32) | numpy.ctypeslib.ndpointer |
import argparse
import os
import pickle as pkl
import numpy as np
import scipy.sparse as smat
from pecos.core.base import clib
from pecos.utils import smat_util
from pecos.utils.cluster_util import ClusterChain
from pecos.xmc import MLModel
from pecos.xmc.xlinear import XLinearModel
def parse_arguments():
parser = argparse.ArgumentParser(
prog="Evaluate how well our model is good at semantic disentablement."
)
parser.add_argument(
"-x",
"--inst-path",
type=str,
required=True,
metavar="PATH",
help="path to npz file of feature matrix",
)
parser.add_argument(
"--y-origin",
type=str,
required=True,
metavar="PATH",
help="path to the npz file of the original label matrix",
)
parser.add_argument(
"--y-binned",
type=str,
required=True,
metavar="PATH",
help="path to the binned label matrix",
)
parser.add_argument(
"-m",
"--model-folder",
type=lambda p: os.path.abspath(p),
required=True,
metavar="DIR",
help="path to the model folder",
)
parser.add_argument(
"--binned-mapper", type=str, required=True, help="path to the mapper file",
)
parser.add_argument(
"--pseudo-label-mapper",
type=str,
default=None,
help="path to pseudo label mapper. If None, this variable is ignored.",
)
parser.add_argument(
"--unused-labels",
type=str,
default=None,
help="path to unused label set. If None, this variable is ignored.",
)
parser.add_argument(
"-b",
"--beam-size",
type=int,
required=True,
help="Beam size to calculate the matching matrix.",
)
args = parser.parse_args()
return args
def get_matching_matrix(xlinear_model, Xt, beam_size=10):
"""Compute the matching matrix.
The matching matrix indicates which cluster(s) are selected for data point in X. The
final results is a sparse matrix of shape N x C, where N is the number of data, and C
is the number of clusters.
Args:
xlinear_model: the pretrained model.
Xt: the feature matrix.
beam_size: beam size for inference.
Returns:
The matching matrix in CSR format.
"""
matching_result = []
batch_size = 8192 * 16
kwargs = {
"beam_size": beam_size,
"only_topk": 30,
"post_processor": "l3-hinge",
}
model_chain = xlinear_model.model.model_chain
for i in range((Xt.shape[0] - 1) // batch_size + 1):
beg, end = i * batch_size, (i + 1) * batch_size
end = min(end, Xt.shape[0])
X_selected = Xt[beg:end]
csr_codes = None
for level in range(len(model_chain) - 1):
cur_model = model_chain[level]
level_pred = cur_model.predict(
X_selected,
csr_codes=csr_codes,
only_topk=beam_size,
post_processor=kwargs["post_processor"],
)
csr_codes = level_pred
matching_result.append(csr_codes)
matching_result = smat.vstack(matching_result, format="csr")
return matching_result
def positive_instances(Xt, Yt, underlying_label_ids):
"""Find the instances having some particular label ids.
For all labels in `underlying_label_ids`, return the list of instances containing
that label as ground-truth.
Args:
Xt: The feature matrix of shape N x d, where N is number of instances, d is
feature dimension.
Yt: The label matrix of shape N x L, L is the size of label space.
underlying_label_ids: The set of target labels.
Returns:
A list of positive instance ids and their feature vectors.
"""
row_ids_list = []
Xt_subsets = []
for label_id in underlying_label_ids:
row_ids = Yt.indices[Yt.indptr[label_id] : Yt.indptr[label_id + 1]]
Xt_subsets.append(Xt[row_ids])
row_ids_list.append(row_ids)
return row_ids_list, Xt_subsets
def label_id_to_cluster_id(label_id, C, unused_labels):
"""Map the label id to the cluster id according to clustering matrix.
Args:
label_id: the label id.
C: the cluster matrix of shape L x C.
unused_labels: used to adjust the label id.
Returns:
the cluster id.
"""
# count how many unused labels that are smaller than label_id
offset = sum([l < label_id for l in unused_labels])
row_id = label_id - offset
assert C.indptr[row_id] + 1 == C.indptr[row_id + 1]
cluster_id = C.indices[C.indptr[row_id]]
return cluster_id
def match(
xlinear_model, beam_size, instance_ids_list, X_subsets, cid1, cid2,
):
"""Given two clusters, distribute all instances to two groups.
Separate all input features `X_subsets` into two subsets `x_cid1` and `x_cid2`,
according to the prediction results from `xlinear_model`. If the scores of an
instance in `cid1` is higher than `cid2`, than this instance is assigned to group1.
Args:
xlinear_model: the model.
beam_size: beam size for inference.
instance_ids_list: the instance id of `X_subsets`.
X_subsets: the feature matrix.
cid1, cid2: the cluster ids of two clusters.
Returns:
the instance ids of two subsets.
"""
x_cid1 = []
x_cid2 = []
for instance_ids, X_subset in zip(instance_ids_list, X_subsets):
matching_matrix = get_matching_matrix(
xlinear_model, X_subset, beam_size,
).toarray()
mask = matching_matrix[:, cid1] > matching_matrix[:, cid2]
x_cid1.extend(instance_ids[mask])
x_cid2.extend(instance_ids[~mask])
return x_cid1, x_cid2
def random_baseline(S1, S2):
"""A random baseline that assigns all instances randomly to two groups.
Args:
S1, S2: the ground truth assignment according to their semantic meanings.
Returns:
VI scores of this random baseline.
"""
S = np.concatenate((S1, S2), axis=0)
experiment = []
for _ in range(100):
np.random.shuffle(S)
selector = np.random.randn(len(S)) > 0
K1 = S[selector]
K2 = S[~selector]
vi_sample = VI(S1, S2, K1, K2)
experiment.append(vi_sample)
return np.mean(experiment)
def VI(S1, S2, K1, K2):
"""Computes the Variation of Information(VI) between two clusters.
See: https://en.wikipedia.org/wiki/Variation_of_information for more information.
Args:
S1, S2: the set of ground truth clusters.
K1, K2: the predicted clusters.
Returns:
the VI score.
"""
assert len(S1) + len(S2) == len(K1) + len(K2)
n = len(S1) + len(S2)
eps = 1.0e-8
p1 = len(S1) / n + eps
p2 = len(S2) / n + eps
q1 = len(K1) / n + eps
q2 = len(K2) / n + eps
r11 = len( | np.intersect1d(S1, K1) | numpy.intersect1d |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = | N.array([1,2,2]) | numpy.array |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import numpy as np
import arrayfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for all parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 norm
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_real = (False, "larger")
self.positivity_imag = (False, "larger")
self.pure_real = False
self.pure_imag = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
slice_separation: For multislice algorithms, how far apart are slices separated, array (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, slice_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * np.ones(shape, dtype = np_complex_datatype) if RI_obj is None else RI_obj.astype(np_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if slice_separation is not None:
#for discontinuous slices
assert len(slice_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.slice_separation = np.asarray(slice_separation).astype(np_float_datatype)
else:
#for continuous slices
self.slice_separation = self.pixel_size_z * np.ones((shape[2]-1,), dtype = np_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.trans_obj = np.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * np.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.real/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imag/k0**2/2.0)**2
RI_obj_real = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imag = -0.5 * self.V_obj.imag/k0**2/RI_obj_real
self.RI_obj = RI_obj_real + 1.0j * RI_obj_imag
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illumination angles in x, default = [0] (on axis)
fy_illu_list: illumination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illumination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illumination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_inplace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before calling, make sure correct object is contained
"""
obj_gpu = af.to_array(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.append([])
if self._scattering_obj.back_scatter:
back_scattered_predict.append([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].append(np.array(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append( | np.array(fields["back_scattered_field"]) | numpy.array |
# coding: utf-8
# ### Compute results for task 1 on the humour dataset.
#
# Please see the readme for instructions on how to produce the GPPL predictions that are required for running this script.
#
# Then, set the variable resfile to point to the ouput folder of the previous step.
#
import string
import pandas as pd
import os, logging, csv
from nltk.tokenize import word_tokenize
from scipy.stats.mstats import spearmanr, pearsonr
import numpy as np
# Where to find the predictions and gold standard
resfile = './results/experiment_humour_2019-02-26_20-44-52/results-2019-02-26_20-44-52.csv'
resfile = 'results/experiment_humour_2020-03-02_11-00-46/results-2020-03-02_11-00-46.csv'
# Load the data
data = pd.read_csv(resfile, usecols=[0,1,2])
ids = data['id'].values
bws = data['bws'].values
gppl = data['predicted'].values
# ### Ties in the BWS Scores contribute to the discrepeancies between BWS and GPPL
#
# GPPL scores are all unique, but BWS contains many ties.
# Selecting only one of the tied items increases the Spearman correlation.
#
# Find the ties in BWS. Compute correlations between those tied items for the GPPL scores vs. original BWS scores and GPPL vs. scaled BWS scores.
# Do the ties contribute a lot of the differences in the overall ranking?
# Another way to test if the ties contribute differences to the ranking:
# Select only one random item from each tie and exclude the rest, then recompute.
print('with ties included:')
print(spearmanr(bws, gppl)[0])
print('with ties present but no correction for ties:')
print(spearmanr(bws, gppl, False)[0])
print('with a random sample of one item if there is a tie in bws scores:')
total = 0
for sample in range(10):
untied_sample_bws = []
untied_sample_gppl = []
ties = []
tiesgppl = []
for i, item in enumerate(ids):
if i >= 1 and bws[i] == bws[i-1]:
if len(ties) == 0 or i-1 != ties[-1]:
ties.append(i-1) # the previous one should be added to the list if we have just recognised it as a tie
ties.append(i)
#randomly choose whether to keep the previous item or this one
if np.random.rand() < 0.5:
pass
else:
untied_sample_bws.pop()
untied_sample_gppl.pop()
untied_sample_bws.append(bws[i])
untied_sample_gppl.append(gppl[i])
else:
untied_sample_bws.append(bws[i])
untied_sample_gppl.append(gppl[i])
if i >= 1 and gppl[i] == gppl[i-1]:
if len(tiesgppl) == 0 or i-1 != tiesgppl[-1]:
tiesgppl.append(i-1) # the previous one should be added to the list if we have just recognised it as a tie
tiesgppl.append(i)
rho = spearmanr(untied_sample_bws, untied_sample_gppl)[0]
total += rho
print(rho)
print('Number of BWS tied items = %i' % len(ties))
print('Number of GPPL tied items = %i' % len(tiesgppl))
sample_size = len(untied_sample_bws)
print('Mean for samples without ties = %f' % (total / 10))
print('Correlations for random samples of the same size (%i), allowing ties: ' % sample_size)
total = 0
for sample in range(10):
# take a random sample, without caring about ties
randidxs = np.random.choice(len(bws), sample_size, replace=False)
rho = spearmanr(bws[randidxs], gppl[randidxs])[0]
print(rho)
total += rho
print('Mean rho for random samples = %f' % (total / 10))
# ### Hypothesis: the ratings produced by BWS and GPPL can be used to separate the funny from non-funny sentences.
# This compares the predicted ratings to the gold standard *classifications* to see if the ratings can be used
# to separate funny and non-funny.
# load the discrete labels
def get_cats(fname):
with open(os.path.join('./data/pl-humor-full', fname), 'r') as f:
for line in f:
line = line.strip()
for c in string.punctuation + ' ' + '\xa0':
line = line.replace(c, '')
# line = line.replace(' ', '').strip()
# line = line.replace('"', '') # this is probably borked by tokenization?
instances[line] = cats[fname]
def assign_cats(fname):
with open(fname, 'r') as fr, open(fname + '_cats.csv', 'w') as fw:
reader = csv.DictReader(fr)
writer = csv.DictWriter(fw, fieldnames=['id', 'bws', 'predicted', 'category', 'sentence'])
writer.writeheader()
for row in reader:
sentence = row['sentence'].strip()
for c in string.punctuation + ' ':
sentence = sentence.replace(c, '')
# sentence = row['sentence'].replace(' ','').strip()
# sentence = sentence.replace('`', '\'') # this is probably borked by tokenization?
# sentence = sentence.replace('"', '') # this is probably borked by tokenization?
row['category'] = instances[sentence]
writer.writerow(row)
cats = dict()
cats['jokes_heterographic_puns.txt'] = 'hetpun'
cats['jokes_homographic_puns.txt'] = 'hompun'
cats['jokes_nonpuns.txt'] = 'nonpun'
cats['nonjokes.txt'] = 'non'
instances = dict()
for fname in cats.keys():
get_cats(fname)
assign_cats(resfile)
catfile = os.path.expanduser(resfile + '_cats.csv')
#'./results/experiment_humour_2019-02-28_16-39-36/cats/results-2019-02-28_20-45-25.csv')
cats = pd.read_csv(catfile, index_col=0, usecols=[0,3])
cat_list = np.array([cats.loc[instance].values[0] if instance in cats.index else 'unknown' for instance in ids])
gfunny = (cat_list == 'hompun') | (cat_list == 'hetpun')
gunfunny = (cat_list == 'nonpun') | (cat_list == 'non')
print('Number of funny = %i, non-funny = %i' % (np.sum(gfunny),
np.sum(gunfunny) ) )
# check classification accuracy -- how well does our ranking separate the two classes
from sklearn.metrics import roc_auc_score
gold = np.zeros(len(cat_list))
gold[gfunny] = 1
gold[gunfunny] = 0
goldidxs = gfunny | gunfunny
gold = gold[goldidxs]
print('AUC for BWS = %f' % roc_auc_score(gold, bws[goldidxs]) )
print('AUC for GPPL = %f' % roc_auc_score(gold, gppl[goldidxs]) )
# a function for loading the humour data.
def load_crowd_data_TM(path):
"""
Read csv and create preference pairs of tokenized sentences.
:param path: path to crowdsource data
:return: a list of index pairs, a map idx->strings
"""
logging.info('Loading crowd data...')
pairs = []
idx_instance_list = []
with open(path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
next(reader) # skip header row
for line_no, line in enumerate(reader):
answer = line[1]
A = word_tokenize(line[2])
B = word_tokenize(line[3])
# add instances to list (if not alreay in it)
if A not in idx_instance_list:
idx_instance_list.append(A)
if B not in idx_instance_list:
idx_instance_list.append(B)
# add pairs to list (in decreasing preference order)
if answer == 'A':
pairs.append((idx_instance_list.index(A), idx_instance_list.index(B)))
if answer == 'B':
pairs.append((idx_instance_list.index(B), idx_instance_list.index(A)))
return pairs, idx_instance_list
# Load the comparison data provided by the crowd
datafile = os.path.expanduser('./data/pl-humor-full/results.tsv')
pairs, idxs = load_crowd_data_TM(datafile)
pairs = np.array(pairs)
np.savetxt(os.path.expanduser('./data/pl-humor-full/pairs.csv'), pairs, '%i', delimiter=',')
# For each item compute its BWS scores
# but scale by the BWS scores of the items they are compared against.
# This should indicate whether two items with same BWS score should
# actually be ranked differently according to what they were compared against.
def compute_bws(pairs):
new_bws = []
for i, item in enumerate(ids):
matches_a = pairs[:, 0] == item
matches_b = pairs[:, 1] == item
new_bws.append((np.sum(matches_a) - np.sum(matches_b))
/ float(np.sum(matches_a) + np.sum(matches_b)))
return new_bws
# ### Agreement and consistency of annotators
# Table 3: For the humour dataset, compute the correlation between the gold standard and the BWS scores with subsets of data.
# Take random subsets of pairs so that each pair has only 4 annotations
def get_pid(pair):
return '#'.join([str(i) for i in sorted(pair)])
def compute_mean_correlation(nannos):
nreps = 10
mean_rho = 0
for rep in range(nreps):
pair_ids = list([get_pid(pair) for pair in pairs])
upair_ids = | np.unique(pair_ids) | numpy.unique |
# -*- coding: utf-8 -*-
from . import plot_settings as pls
from . import plots as pl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import logging
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from scipy.stats.kde import gaussian_kde
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
def find_best_para(para_trace, bins):
''' find the best parameter and its 1-sigma/2-sigma for (non) Gaussian distribution '''
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
hist = np.histogram(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para = np.r_[best_para, para_err_left, para_err_right]
return para
def find_best_para_plt(para_trace, bins):
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
plt.figure()
hist = plt.hist(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para = np.r_[best_para, para_err_left, para_err_right]
return para
def find_best_para2(para_trace, bins):
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
hist = np.histogram(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left1 = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right1 = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para_err_left2 = best_para - para_trace[int(len(left) * (1-0.9544))]
para_err_right2 = para_trace[int(len(right) * 0.9544) + len(left)] - best_para
para = np.r_[best_para, para_err_left2,para_err_left1, para_err_right1,para_err_right2]
return para
def _quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
This is a copy of quantile in corner (https://github.com/dfm/corner.py). Copyright (c) 2013-2015 <NAME>.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = | np.atleast_1d(x) | numpy.atleast_1d |
from __future__ import division
import pytest
import numpy as np
import cudf as pd
import fast_carpenter.masked_tree as m_tree
@pytest.fixture
def tree_no_mask(infile, full_event_range):
return m_tree.MaskedUprootTree(infile, event_ranger=full_event_range)
@pytest.fixture
def tree_w_mask_bool(infile, event_range):
mask = np.ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
return m_tree.MaskedUprootTree(infile, event_ranger=event_range, mask=mask)
@pytest.fixture
def tree_w_mask_int(infile, event_range):
mask = np.ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
mask = | np.where(mask) | numpy.where |
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.metrics.tests.test_ranking import make_prediction
from sklearn.utils.validation import check_consistent_length
from mcc_f1 import mcc_f1_curve
def test_mcc_f1_curve():
# Test MCC and F1 values for all points of the curve
y_true, _, probas_pred = make_prediction(binary=True)
mcc, f1, thres = mcc_f1_curve(y_true, probas_pred)
check_consistent_length(mcc, f1, thres)
expected_mcc, expected_f1 = _mcc_f1_calc(y_true, probas_pred, thres)
assert_array_almost_equal(f1, expected_f1)
assert_array_almost_equal(mcc, expected_mcc)
def _mcc_f1_calc(y_true, probas_pred, thresholds):
# Alternative calculation of (unit-normalized) MCC and F1 scores
pp = probas_pred
ts = thresholds
tps = np.array([np.logical_and(pp >= t, y_true == 1).sum() for t in ts])
fps = np.array([np.logical_and(pp >= t, y_true == 0).sum() for t in ts])
tns = np.array([np.logical_and(pp < t, y_true == 0).sum() for t in ts])
fns = np.array([np.logical_and(pp < t, y_true == 1).sum() for t in ts])
with np.errstate(divide='ignore', invalid='ignore'):
f1s = 2*tps / (2*tps + fps + fns)
d = np.sqrt((tps+fps)*(tps+fns)*(tns+fps)*(tns+fns))
d = | np.array([1 if di == 0 else di for di in d]) | numpy.array |
import re
import os
import numpy as np
import pandas as pd
import scipy.stats as sps
pd.options.display.max_rows = 4000
pd.options.display.max_columns = 4000
def write_txt(str, path):
text_file = open(path, "w")
text_file.write(str)
text_file.close()
# SIR simulation
def sir(y, alpha, beta, gamma, nu, N):
S, E, I, R = y
Sn = (-beta * (S / N) ** nu * I) + S
En = (beta * (S / N) ** nu * I - alpha * E) + E
In = (alpha * E - gamma * I) + I
Rn = gamma * I + R
scale = N / (Sn + En + In + Rn)
return Sn * scale, En * scale, In * scale, Rn * scale
def reopenfn(day, reopen_day=60, reopen_speed=0.1, reopen_cap = .5):
"""Starting on `reopen_day`, reduce contact restrictions
by `reopen_speed`*100%.
"""
if day < reopen_day:
return 1.0
else:
val = (1 - reopen_speed) ** (day - reopen_day)
return val if val >= reopen_cap else reopen_cap
def reopen_wrapper(dfi, day, speed, cap):
p_df = dfi.reset_index()
p_df.columns = ['param', 'val']
ro = dict(param = ['reopen_day', 'reopen_speed', 'reopen_cap'],
val = [day, speed, cap])
p_df = pd.concat([p_df, pd.DataFrame(ro)])
p_df
SIR_ii = SIR_from_params(p_df)
return SIR_ii['arr_stoch'][:,3]
def scale(arr, mu, sig):
if len(arr.shape)==1:
arr = np.expand_dims(arr, 0)
arr = np.apply_along_axis(lambda x: x-mu, 1, arr)
arr = np.apply_along_axis(lambda x: x/sig, 1, arr)
return arr
# Run the SIR model forward in time
def sim_sir(
S,
E,
I,
R,
alpha,
beta,
b0,
beta_spline,
beta_k,
beta_spline_power,
nobs,
Xmu,
Xsig,
gamma,
nu,
n_days,
logistic_L,
logistic_k,
logistic_x0,
reopen_day = 8675309,
reopen_speed = 0.0,
reopen_cap = 1.0,
):
N = S + E + I + R
s, e, i, r = [S], [E], [I], [R]
if len(beta_spline) > 0:
knots = np.linspace(0, nobs-nobs/beta_k/2, beta_k)
for day in range(n_days):
y = S, E, I, R
# evaluate splines
if len(beta_spline) > 0:
X = power_spline(day, knots, beta_spline_power, xtrim = nobs)
# X = scale(X, Xmu, Xsig)
#scale to prevent overflows and make the penalties comparable across bases
XB = float(X@beta_spline)
sd = logistic(L = 1, k=1, x0 = 0, x= b0 + XB)
else:
sd = logistic(logistic_L, logistic_k, logistic_x0, x=day)
sd *= reopenfn(day, reopen_day, reopen_speed, reopen_cap)
beta_t = beta * (1 - sd)
S, E, I, R = sir(y, alpha, beta_t, gamma, nu, N)
s.append(S)
e.append(E)
i.append(I)
r.append(R)
s, e, i, r = np.array(s), np.array(e), np.array(i), np.array(r)
return s, e, i, r
# # compute X scale factor. first need to compute who X matrix across all days
# nobs = 100
# n_days = 100
# beta_spline_power = 2
# beta_spline = np.random.uniform(size = len(knots))
# X = np.stack([power_spline(day, knots, beta_spline_power, xtrim = nobs) for day in range(n_days)])
# # need to be careful with this: apply the scaling to the new X's when predicting
def power_spline(x, knots, n, xtrim):
if x > xtrim: #trim the ends of the spline to prevent nonsense extrapolation
x = xtrim + 1
spl = x - np.array(knots)
spl[spl<0] = 0
spl = spl/(xtrim**n)#scaling -- xtrim is the max number of days, so the highest value that the spline could have
return spl**n
'''
Plan:
beta_t = L/(1 + np.exp(XB))
'''
def logistic(L, k, x0, x):
return L / (1 + np.exp(-k * (x - x0)))
def qdraw(qvec, p_df):
"""
Function takes a vector of quantiles and returns marginals based on the parameters in the parameter data frame
It returns a bunch of parameters for inputting into SIR
It'll also return their probability under the prior
"""
assert len(qvec) == p_df.shape[0]
outdicts = []
for i in range(len(qvec)):
if p_df.distribution.iloc[i] == "constant":
out = dict(param=p_df.param.iloc[i], val=p_df.base.iloc[i], prob=1)
else:
# Construct this differently for different distributoons
if p_df.distribution.iloc[i] == "gamma":
p = (qvec[i], p_df.p1.iloc[i], 0, p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "beta":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "uniform":
p = (qvec[i], p_df.p1.iloc[i], p_df.p1.iloc[i] + p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "norm":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
out = dict(
param=p_df.param.iloc[i],
val=getattr(sps, p_df.distribution.iloc[i]).ppf(*p),
)
# does scipy not have a function to get the density from the quantile?
p_pdf = (out["val"],) + p[1:]
out.update({"prob": getattr(sps, p_df.distribution.iloc[i]).pdf(*p_pdf)})
outdicts.append(out)
return pd.DataFrame(outdicts)
def jumper(start, jump_sd):
probit = sps.norm.ppf(start)
probit += np.random.normal(size=len(probit), scale=jump_sd)
newq = sps.norm.cdf(probit)
return newq
def compute_census(projection_admits_series, mean_los):
"""Compute Census based on exponential LOS distribution."""
census = [0]
for a in projection_admits_series.values:
c = float(a) + (1 - 1 / float(mean_los)) * census[-1]
census.append(c)
return np.array(census[1:])
def SIR_from_params(p_df):
"""
This function takes the output from the qdraw function
"""
n_hosp = int(p_df.val.loc[p_df.param == "n_hosp"])
incubation_days = float(p_df.val.loc[p_df.param == "incubation_days"])
hosp_prop = float(p_df.val.loc[p_df.param == "hosp_prop"])
ICU_prop = float(p_df.val.loc[p_df.param == "ICU_prop"])
vent_prop = float(p_df.val.loc[p_df.param == "vent_prop"])
hosp_LOS = float(p_df.val.loc[p_df.param == "hosp_LOS"])
ICU_LOS = float(p_df.val.loc[p_df.param == "ICU_LOS"])
vent_LOS = float(p_df.val.loc[p_df.param == "vent_LOS"])
recovery_days = float(p_df.val.loc[p_df.param == "recovery_days"])
mkt_share = float(p_df.val.loc[p_df.param == "mkt_share"])
region_pop = float(p_df.val.loc[p_df.param == "region_pop"])
logistic_k = float(p_df.val.loc[p_df.param == "logistic_k"])
logistic_L = float(p_df.val.loc[p_df.param == "logistic_L"])
logistic_x0 = float(p_df.val.loc[p_df.param == "logistic_x0"])
nu = float(p_df.val.loc[p_df.param == "nu"])
beta = float(
p_df.val.loc[p_df.param == "beta"]
) # get beta directly rather than via doubling time
# assemble the coefficient vector for the splines
beta_spline = np.array(p_df.val.loc[p_df.param.str.contains('beta_spline_coef')]) #this evaluates to an empty array if it's not in the params
if len(beta_spline) > 0:
b0 = float(p_df.val.loc[p_df.param == "b0"])
beta_spline_power = np.array(p_df.val.loc[p_df.param == "beta_spline_power"])
nobs = float(p_df.val.loc[p_df.param == "nobs"])
beta_k = int(p_df.loc[p_df.param == "beta_spline_dimension", 'val'])
Xmu = p_df.loc[p_df.param == "Xmu", 'val'].iloc[0]
Xsig = p_df.loc[p_df.param == "Xsig", 'val'].iloc[0]
else:
beta_spline_power = None
beta_k = None
nobs = None
b0 = None
Xmu, Xsig = None, None
reopen_day, reopen_speed, reopen_cap = 1000, 0.0, 1.0
if "reopen_day" in p_df.param.values:
reopen_day = int(p_df.val.loc[p_df.param == "reopen_day"])
if "reopen_speed" in p_df.param.values:
reopen_speed = float(p_df.val.loc[p_df.param == "reopen_speed"])
if "reopen_cap" in p_df.param.values:
reopen_cap = float(p_df.val.loc[p_df.param == "reopen_cap"])
alpha = 1 / incubation_days
gamma = 1 / recovery_days
total_infections = n_hosp / mkt_share / hosp_prop
n_days = 200
# Offset by the incubation period to start the sim
# that many days before the first hospitalization
# Estimate the number Exposed from the number hospitalized
# on the first day of non-zero covid hospitalizations.
from scipy.stats import expon
# Since incubation_days is exponential in SEIR, we start
# the time `offset` days before the first hospitalization
# We determine offset by allowing enough time for the majority
# of the initial exposures to become infected.
offset = expon.ppf(
0.99, 1 / incubation_days
) # Enough time for 95% of exposed to become infected
offset = int(offset)
s, e, i, r = sim_sir(
S=region_pop - total_infections,
E=total_infections,
I=0.0, # n_infec / detection_prob,
R=0.0,
alpha=alpha,
beta=beta,
b0=b0,
beta_spline = beta_spline,
beta_k = beta_k,
beta_spline_power = beta_spline_power,
Xmu = Xmu,
Xsig = Xsig,
nobs = nobs,
gamma=gamma,
nu=nu,
n_days=n_days + offset,
logistic_L=logistic_L,
logistic_k=logistic_k,
logistic_x0=logistic_x0 + offset,
reopen_day=reopen_day,
reopen_speed=reopen_speed,
reopen_cap=reopen_cap
)
arrs = {}
for sim_type in ["mean", "stochastic"]:
if sim_type == "mean":
ds = np.diff(i) + np.diff(r) # new infections is delta i plus delta r
ds = np.array([0] + list(ds))
ds = ds[offset:]
hosp_raw = hosp_prop
ICU_raw = hosp_raw * ICU_prop # coef param
vent_raw = ICU_raw * vent_prop # coef param
hosp = ds * hosp_raw * mkt_share
icu = ds * ICU_raw * mkt_share
vent = ds * vent_raw * mkt_share
elif sim_type == "stochastic":
# Sampling Stochastic Observation
ds = np.diff(i) + | np.diff(r) | numpy.diff |
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
from datetime import date
from math import e
def calc_rate(data1, data2):
if(data2 == 0):
return data1
else:
if(data1 < data2):
return (data2 / data1) * -1
else:
return data1 / data2
def calc_mort_rate(data1, data2):
if(data2 == 0):
return 0
else:
return data1 / data2
def compute_data(parsed_data):
days = np.array([])
new_cases = np.array([])
cases_growth_factor = np.array([])
new_deaths = np.array([])
deaths_growth_factor = np.array([])
new_tests = np.array([])
tests_growth_factor = np.array([])
new_recovered = np.array([])
recovered_growth_factor = np.array([])
new_hospitalized = np.array([])
hospitalized_growth_factor = np.array([])
mortality_rate = np.array([])
active_cases = np.array([])
for i, entry in enumerate(parsed_data[0]):
if(i == 0):
new_cases = np.append(new_cases, parsed_data[1][i] - 0)
cases_growth_factor = np.append(cases_growth_factor, 0)
new_deaths = np.append(new_deaths, parsed_data[2][i] - 0)
deaths_growth_factor = np.append(deaths_growth_factor, 0)
new_tests = np.append(new_tests, parsed_data[3][i] - 0)
tests_growth_factor = np.append(tests_growth_factor, 0)
new_recovered = np.append(new_recovered, parsed_data[4][i] - 0)
recovered_growth_factor = np.append(recovered_growth_factor, 0)
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - 0)
hospitalized_growth_factor = np.append(hospitalized_growth_factor, 0)
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
continue
new_cases = np.append(new_cases, parsed_data[1][i] - parsed_data[1][i-1])
cases_growth_factor = np.append(cases_growth_factor, calc_rate(parsed_data[1][i], parsed_data[1][i-1]))
new_deaths = np.append(new_deaths, parsed_data[2][i] - parsed_data[2][i-1])
deaths_growth_factor = np.append(deaths_growth_factor, calc_rate(parsed_data[2][i], parsed_data[2][i-1]))
new_tests = np.append(new_tests, parsed_data[3][i] - parsed_data[3][i-1])
tests_growth_factor = np.append(tests_growth_factor, calc_rate(parsed_data[3][i], parsed_data[3][i-1]))
new_recovered = np.append(new_recovered, parsed_data[4][i] - parsed_data[4][i-1])
recovered_growth_factor = np.append(recovered_growth_factor, calc_rate(parsed_data[4][i], parsed_data[4][i-1]))
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - parsed_data[5][i-1])
hospitalized_growth_factor = np.append(hospitalized_growth_factor, calc_rate(parsed_data[5][i], parsed_data[5][i-1]))
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
parsed_data.append(days)
parsed_data.append(new_cases)
parsed_data.append(cases_growth_factor)
parsed_data.append(new_deaths)
parsed_data.append(deaths_growth_factor)
parsed_data.append(new_recovered)
parsed_data.append(recovered_growth_factor)
parsed_data.append(new_hospitalized)
parsed_data.append(hospitalized_growth_factor)
parsed_data.append(new_tests)
parsed_data.append(tests_growth_factor)
parsed_data.append(mortality_rate)
parsed_data.append(active_cases)
return parsed_data
def logistic_fn(population):
day_counter = 1
days = np.array([])
logistic = np.array([])
current_cases = 1
while (day_counter < 60):
days = np.append(days, day_counter)
log_fn = population / (1 + ((population / current_cases) - 1) * e ** (-0.38 * day_counter))
print(log_fn)
logistic = np.append(logistic, log_fn)
day_counter += 1
return (days, logistic)
def difference(parsed_data, day1, day2):
print("Data difference between:", parsed_data[0][day1], 'and', parsed_data[0][day2])
print("\u0394Days:\t", parsed_data[6][day2] - parsed_data[6][day1])
print("\u0394Cases:\t", parsed_data[1][day2] - parsed_data[1][day1])
print("\u0394Deaths: ", parsed_data[2][day2] - parsed_data[2][day1])
print("\u0394Recov.: ", parsed_data[4][day2] - parsed_data[4][day1])
print("\u0394Hospi.: ", parsed_data[5][day2] - parsed_data[5][day1])
print("\u0394Tests:\t", parsed_data[3][day2] - parsed_data[3][day1])
def projection(next_days, days_passed, parsed_data):
total_cases = float(parsed_data[1][len(parsed_data[1])-1])
total_deaths = float(parsed_data[2][len(parsed_data[2])-1])
total_tests = float(parsed_data[3][len(parsed_data[4])-1])
total_recovered = float(parsed_data[4][len(parsed_data[4])-1])
total_hospitalized = float(parsed_data[5][len(parsed_data[5])-1])
total_active = float(parsed_data[18][len(parsed_data[18])-1])
counter = 0
avg_cases_gf = 0.0
avg_deaths_gf = 0.0
avg_tests_gf = 0.0
avg_recovered_gf = 0.0
avg_hospitalized_gf = 0.0
avg_active_gf = 0.0
while(counter < days_passed):
avg_cases_gf += parsed_data[8][len(parsed_data[8]) - 1 - counter]
avg_deaths_gf += parsed_data[10][len(parsed_data[10]) - 1 - counter]
avg_tests_gf += parsed_data[16][len(parsed_data[16]) - 1 - counter]
avg_recovered_gf += parsed_data[12][len(parsed_data[12]) - 1 - counter]
avg_hospitalized_gf += parsed_data[14][len(parsed_data[14]) - 1 - counter]
avg_active_gf += parsed_data[18][len(parsed_data[18]) - 1 - counter]
counter += 1
avg_cases_gf /= days_passed
avg_deaths_gf /= days_passed
avg_tests_gf /= days_passed
avg_recovered_gf /= days_passed
avg_hospitalized_gf /= days_passed
avg_active_gf /= days_passed
print('Avg Cases Growth Factor (past', days_passed ,'days):', round(avg_cases_gf, 5))
print('Avg Deaths Growth Factor (past', days_passed ,'days):', round(avg_deaths_gf, 5))
print('Avg Tests Growth Factor (past', days_passed ,'days):', round(avg_tests_gf, 5))
print('Avg Recovered Growth Factor (past', days_passed ,'days):', round(avg_recovered_gf, 5))
print('Avg Hospitalized Growth Factor (past', days_passed ,'days):', round(avg_hospitalized_gf, 5))
print('Avg Active Cases Growth Factor (past', days_passed ,'days):', round(avg_active_gf, 5))
counter = 0
while(counter < next_days):
total_cases = total_cases * avg_cases_gf
total_deaths = total_deaths * avg_deaths_gf
total_tests = total_tests * avg_tests_gf
total_recovered = total_recovered * avg_recovered_gf
total_hospitalized = total_hospitalized * avg_hospitalized_gf
total_active = total_active * avg_active_gf
counter += 1
print("Projections for the next", next_days, "days:")
print("Cases:", round(total_cases))
print("Active:", round(total_active))
print("Deaths:", round(total_deaths))
print("Tests:", round(total_tests))
print("Recovered:", round(total_recovered))
print("Hospitalized:", round(total_hospitalized))
def linear_regression(x, y):
x_nums = [i for i in range(0, len(x))] #create list of integers given that original x are string values
n = len(x_nums) #number of elements in x axis (same as y axis)
add_x = sum(x_nums) #add all x axis elements
add_y = sum(y) #add all y axis elements
add_x_sqr = sum([i**2 for i in x_nums]) #add all y axis elements squared
add_xy = sum([x_nums[i] * y[i] for i in range(0, n)]) #add the product of each corresponding pair from x_nums and y
slope = (n * add_xy - add_x * add_y) / (n * add_x_sqr - add_x**2) #compute slope of linear regression
y_intercept = (add_y * add_x_sqr - add_x * add_xy) / (n * add_x_sqr - add_x**2) #compute the y intercept of the linear regression
lin_reg_x = [i for i in range(0, len(x_nums))] #create list of elements from 0 to length of x_nums
lin_reg_y = [slope * i + y_intercept for i in lin_reg_x] #replace x value in equation to find the y in linear regression
return [slope, y_intercept, lin_reg_y] #return slope, y_intercept, and linear regression list for y
def plot_graph(x, y, color, x_label, y_label, chart_title, file_name='', save=False, log_view=False, trend=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
if(log_view):
plt.yscale('log')
if(trend):
lin_reg_result = linear_regression(x, y)
lin_reg_equation = str(lin_reg_result[0])[:10] + 'X '
if(lin_reg_result[1] >= 0):
lin_reg_equation += '+'
lin_reg_equation += str(lin_reg_result[1])[:10]
plt.plot(x, lin_reg_result[2], color + '--', label = lin_reg_equation)
plt.legend(loc='upper left')
plt.plot(x, y, 'ko', x, y, color)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def plot_graph_all(parsed_data, chart_title, from_day, to_day, file_name='', save=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'b', label = "Cases")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'r', label = "Deaths")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'g', label = "Recovered")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'k', label = "Active Cases")
plt.legend(loc="upper left")
plt.xlabel("Days")
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.plotplt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def print_cases(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[2]), end = '')
print('%13s'%(header[3]), end = '')
print('%13s'%(header[4]), end = '')
print('%13s'%(header[18]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[1][i]), '%12s'%(data[7][i]), '%12s'%(str(data[8][i])[:8]), '%12s'%(data[18][i]))
def print_deaths(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[5]), end = '')
print('%13s'%(header[6]), end = '')
print('%13s'%(header[7]), end = '')
print('%13s'%(header[5]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[2][i]), '%12s'%(data[9][i]), '%12s'%(data[10][i]), '%12s'%(data[17][i]))
def print_tests(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[14]), end = '')
print('%13s'%(header[15]), end = '')
print('%13s'%(header[16]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[3][i]), '%12s'%(data[15][i]), '%12s'%(data[16][i]))
def print_recovered(header, data):
| np.set_printoptions(precision=3) | numpy.set_printoptions |
################################################################################
# Copyright (c) 2009-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Coordinate conversions not found in PyEphem."""
from __future__ import print_function, division, absolute_import
import numpy as np
# --------------------------------------------------------------------------------------------------
# --- Geodetic coordinate transformations
# --------------------------------------------------------------------------------------------------
def lla_to_ecef(lat_rad, long_rad, alt_m):
"""Convert WGS84 spherical coordinates to ECEF cartesian coordinates.
This converts a position on the Earth specified in geodetic latitude,
longitude and altitude to earth-centered, earth-fixed (ECEF) cartesian
coordinates. This code assumes the WGS84 earth model, described in
[NIMA2004]_.
Parameters
----------
lat_rad : float or array
Latitude (customary geodetic, not geocentric), in radians
long_rad : float or array
Longitude, in radians
alt_m : float or array
Altitude, in metres above WGS84 ellipsoid
Returns
-------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
References
----------
.. [NIMA2004] National Imagery and Mapping Agency, "Department of Defense
World Geodetic System 1984," NIMA TR8350.2, Page 4-4, last updated
June, 2004.
"""
# WGS84 Defining Parameters
a = 6378137.0 # semi-major axis of Earth in m
f = 1.0 / 298.257223563 # flattening of Earth
# WGS84 derived geometric constants
e2 = 2 * f - f ** 2 # first eccentricity squared
# intermediate calculation
# (normal, or prime vertical radius of curvature)
R = a / np.sqrt(1.0 - e2 * np.sin(lat_rad) ** 2)
x_m = (R + alt_m) * np.cos(lat_rad) * np.cos(long_rad)
y_m = (R + alt_m) * np.cos(lat_rad) * np.sin(long_rad)
z_m = ((1.0 - e2) * R + alt_m) * np.sin(lat_rad)
return x_m, y_m, z_m
def ecef_to_lla(x_m, y_m, z_m):
"""Convert ECEF cartesian coordinates to WGS84 spherical coordinates.
This converts an earth-centered, earth-fixed (ECEF) cartesian position to a
position on the Earth specified in geodetic latitude, longitude and altitude.
This code assumes the WGS84 earth model.
Parameters
----------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
Returns
-------
lat_rad : float or array
Latitude (customary geodetic, not geocentric), in radians
long_rad : float or array
Longitude, in radians
alt_m : float or array
Altitude, in metres above WGS84 ellipsoid
Notes
-----
Based on the most accurate algorithm according to Zhu [zhu]_, which is
summarised by Kaplan [kaplan]_ and described in the Wikipedia entry [geo]_.
.. [zhu] <NAME>, "Conversion of Earth-centered Earth-fixed coordinates to
geodetic coordinates," Aerospace and Electronic Systems, IEEE Transactions
on, vol. 30, pp. 957-961, 1994.
.. [kaplan] Kaplan, "Understanding GPS: principles and applications," 1 ed.,
Norwood, MA 02062, USA: Artech House, Inc, 1996.
.. [geo] Wikipedia entry, "Geodetic system", 2009.
"""
# WGS84 Defining Parameters
a = 6378137.0 # semi-major axis of Earth in m
f = 1.0 / 298.257223563 # flattening of Earth
# WGS84 derived geometric constants
b = a * (1.0 - f) # semi-minor axis in m
e2 = 2 * f - f ** 2 # first eccentricity squared
ep2 = f * (2.0 - f) / (1.0 - f) ** 2 # second eccentricity squared
# Define squared terms for convenience
a2, b2 = a ** 2, b ** 2
x2, y2, z2 = x_m ** 2, y_m ** 2, z_m ** 2
r = np.sqrt(x2 + y2)
E2 = a2 - b2
F = 54.0 * b2 * z2
G = r ** 2 + (1 - e2) * z2 - e2 * E2
C = (e2 ** 2 * F * r ** 2) / (G ** 3)
S = (1.0 + C + np.sqrt(C ** 2 + 2 * C)) ** (1. / 3.)
P = F / (3.0 * (S + 1.0 / S + 1.0) ** 2 * G ** 2)
Q = | np.sqrt(1.0 + 2.0 * e2 ** 2 * P) | numpy.sqrt |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy import stats
from tensorflow.keras import layers
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from itertools import product
from .layers import *
from .utils import get_interaction_list
class GAMINet(tf.keras.Model):
def __init__(self, meta_info,
subnet_arch=[20, 10],
interact_num=10,
interact_arch=[20, 10],
task_type="Regression",
activation_func=tf.tanh,
main_grid_size=41,
interact_grid_size=41,
lr_bp=0.001,
batch_size=500,
main_effect_epochs=2000,
interaction_epochs=2000,
tuning_epochs=50,
loss_threshold_main=0.01,
loss_threshold_inter=0.01,
val_ratio=0.2,
early_stop_thres=100,
random_state=0,
threshold =0.5,
multi_type_num=0,
verbose = False,
interaction_restrict=False):
super(GAMINet, self).__init__()
# Parameter initiation
self.meta_info = meta_info
self.input_num = len(meta_info) - 1
self.task_type = task_type
self.subnet_arch = subnet_arch
self.main_grid_size = main_grid_size
self.interact_grid_size = interact_grid_size
self.activation_func = activation_func
self.interact_arch = interact_arch
self.max_interact_num = int(round(self.input_num * (self.input_num - 1) / 2))
self.interact_num = min(interact_num, self.max_interact_num)
self.interact_num_added = 0
self.interaction_list = []
self.loss_threshold_main = loss_threshold_main
self.loss_threshold_inter = loss_threshold_inter
self.lr_bp = lr_bp
self.batch_size = batch_size
self.tuning_epochs = tuning_epochs
self.main_effect_epochs = main_effect_epochs
self.interaction_epochs = interaction_epochs
self.verbose = verbose
self.early_stop_thres = early_stop_thres
self.random_state = random_state
self.threshold = threshold
self.interaction_restrict = interaction_restrict
self.multi_type_num = multi_type_num
np.random.seed(random_state)
tf.random.set_seed(random_state)
self.categ_variable_num = 0
self.numerical_input_num = 0
self.categ_variable_list = []
self.categ_index_list = []
self.numerical_index_list = []
self.numerical_variable_list = []
self.variables_names = []
self.feature_type_list = []
self.interaction_status = False
self.user_feature_list = []
self.item_feature_list = []
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["source"] == "user":
self.user_feature_list.append(indice)
elif feature_info["source"] == "item":
self.item_feature_list.append(indice)
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["type"] == "target":
continue
elif feature_info["type"] == "categorical":
self.categ_variable_num += 1
self.categ_index_list.append(indice)
self.feature_type_list.append("categorical")
self.categ_variable_list.append(feature_name)
elif feature_info["type"] == "id":
continue
else:
self.numerical_input_num +=1
self.numerical_index_list.append(indice)
self.feature_type_list.append("continuous")
self.numerical_variable_list.append(feature_name)
self.variables_names.append(feature_name)
print(self.variables_names)
self.interact_num = len([item for item in product(self.user_feature_list, self.item_feature_list)])
# build
self.maineffect_blocks = MainEffectBlock(meta_info=self.meta_info,
numerical_index_list=list(self.numerical_index_list),
categ_index_list=self.categ_index_list,
subnet_arch=self.subnet_arch,
activation_func=self.activation_func,
grid_size=self.main_grid_size)
self.interact_blocks = InteractionBlock(interact_num=self.interact_num,
meta_info=self.meta_info,
interact_arch=self.interact_arch,
activation_func=self.activation_func,
grid_size=self.interact_grid_size)
self.output_layer = OutputLayer(input_num=self.input_num,
interact_num=self.interact_num,
task_type=self.task_type,
multi_type_num = self.multi_type_num)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr_bp)
if self.task_type == "Regression":
#self.loss_fn = tf.keras.losses.MeanSquaredError()
self.loss_fn = tf.keras.losses.MeanAbsoluteError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
elif self.task_type == "MultiClassification":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
elif self.task_type == "Ordinal_Regression":
self.loss_fn = tf.keras.losses.CategoricalCrossentropy()
else:
print(self.task_type)
raise ValueError("The task type is not supported")
def call(self, inputs, main_effect_training=False, interaction_training=False):
self.maineffect_outputs = self.maineffect_blocks(inputs, training=main_effect_training)
if self.interaction_status:
self.interact_outputs = self.interact_blocks(inputs, training=interaction_training)
else:
self.interact_outputs = tf.zeros([inputs.shape[0], self.interact_num])
concat_list = [self.maineffect_outputs]
if self.interact_num > 0:
concat_list.append(self.interact_outputs)
if self.task_type == "Regression":
output = self.output_layer(tf.concat(concat_list, 1))
elif self.task_type == "Classification":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "Ordinal_Regression":
output = tf.nn.sigmoid(self.output_layer(tf.concat(concat_list, 1)))
elif self.task_type == "MultiClassification":
output = tf.nn.softmax(self.output_layer(tf.concat(concat_list, 1)))
else:
raise ValueError("The task type is not supported")
return output
@tf.function
def predict_graph(self, x, main_effect_training=False, interaction_training=False):
return self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
def predict_initial(self, x, main_effect_training=False, interaction_training=False):
try:
self.task_type = 'Regression'
return self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training)
finally:
self.task_type = 'Classification'
def predict(self, x):
if self.task_type == "Ordinal_Regression":
ind = self.scan(self.predict_graph(x).numpy(),self.threshold)
return tf.keras.backend.eval(ind)
if self.task_type == "MultiClassification":
ind = tf.argmax(self.predict_graph(x).numpy(),axis=1)
return tf.keras.backend.eval(ind)
return self.predict_graph(x).numpy()
@tf.function
def evaluate_graph_init(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
@tf.function
def evaluate_graph_inter(self, x, y, main_effect_training=False, interaction_training=False):
return self.loss_fn(y, self.__call__(tf.cast(x, tf.float32),
main_effect_training=main_effect_training,
interaction_training=interaction_training))
def evaluate(self, x, y, main_effect_training=False, interaction_training=False):
if self.interaction_status:
return self.evaluate_graph_inter(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).numpy()
else:
return self.evaluate_graph_init(x, y,
main_effect_training=main_effect_training,
interaction_training=interaction_training).numpy()
@tf.function
def train_main_effect(self, inputs, labels, main_effect_training=True, interaction_training=False):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.ordinal_bias)
else:
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.main_effect_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.append(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_interaction(self, inputs, labels, main_effect_training=False, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.interact_blocks.weights
train_weights.append(self.output_layer.interaction_weights)
train_weights.append(self.output_layer.interaction_output_bias)
else:
train_weights = self.interact_blocks.weights
train_weights.append(self.output_layer.interaction_weights)
train_weights.append(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights)):
if train_weights[i].name in trainable_weights_names:
train_weights_list.append(train_weights[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
@tf.function
def train_all(self, inputs, labels, main_effect_training=True, interaction_training=True):
with tf.GradientTape() as tape:
pred = self.__call__(inputs, main_effect_training=main_effect_training,
interaction_training=interaction_training)
total_loss = self.loss_fn(labels, pred)
if self.task_type == "Ordinal_Regression":
train_weights = self.maineffect_blocks.weights
train_weights.append(self.output_layer.main_effect_weights)
train_weights.append(self.output_layer.ordinal_bias)
else:
train_weights_main = self.maineffect_blocks.weights
train_weights_main.append(self.output_layer.main_effect_weights)
train_weights_main.append(self.output_layer.main_effect_output_bias)
train_weights_inter = self.interact_blocks.weights
train_weights_inter.append(self.output_layer.interaction_weights)
train_weights_inter.append(self.output_layer.interaction_output_bias)
train_weights_list = []
trainable_weights_names = [self.trainable_weights[j].name for j in range(len(self.trainable_weights))]
for i in range(len(train_weights_main)):
if train_weights_main[i].name in trainable_weights_names:
train_weights_list.append(train_weights_main[i])
for i in range(len(train_weights_inter)):
if train_weights_inter[i].name in trainable_weights_names:
train_weights_list.append(train_weights_inter[i])
grads = tape.gradient(total_loss, train_weights_list)
self.optimizer.apply_gradients(zip(grads, train_weights_list))
def get_main_effect_rank(self,j, tr_x):
sorted_index = np.array([])
componment_scales = [0 for i in range(self.input_num)]
beta = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,j].numpy() * np.array([main_effect_norm]))
if np.sum(np.abs(beta)) > 10**(-10):
componment_scales = (np.abs(beta) / np.sum(np.abs(beta))).reshape([-1])
sorted_index = np.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_interaction_rank(self,j, tr_x):
sorted_index = np.array([])
componment_scales = [0 for i in range(self.interact_num_added)]
gamma = []
if self.interact_num_added > 0:
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,j].numpy()[:self.interact_num_added]
# * np.array([interaction_norm]).reshape([-1, 1]))[0]
if np.sum(np.abs(gamma)) > 10**(-10):
componment_scales = (np.abs(gamma) / np.sum(np.abs(gamma))).reshape([-1])
sorted_index = np.argsort(componment_scales)[::-1]
return sorted_index, componment_scales
def get_all_active_rank(self,class_,tr_x):
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,class_].numpy() * np.array([main_effect_norm])
# * self.output_layer.main_effect_switcher[:,class_].numpy()).reshape([-1, 1])
beta = []
gamma = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
beta = np.array(beta * self.output_layer.main_effect_switcher[:,class_].numpy()).reshape(-1,1)
gamma = np.array(gamma * self.output_layer.interaction_switcher[:,class_].numpy()).reshape(-1,1)
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,class_].numpy()[:self.interact_num_added]
# * np.array([interaction_norm])
# * self.output_layer.interaction_switcher[:,class_].numpy()[:self.interact_num_added]).reshape([-1, 1])
#gamma = np.vstack([gamma, np.zeros((self.interact_num - self.interact_num_added, 1)).reshape([-1, 1]) ])
componment_coefs = np.vstack([beta, gamma])
if np.sum(np.abs(componment_coefs)) > 10**(-10):
componment_scales = (np.abs(componment_coefs) / np.sum(np.abs(componment_coefs))).reshape([-1])
else:
componment_scales = [0 for i in range(self.input_num + self.interact_num_added)]
return componment_scales
def get_component(self, tr_x):
#main_effect_norm = [self.maineffect_blocks.subnets[i].moving_norm.numpy()[0] for i in range(self.input_num)]
#beta = (self.output_layer.main_effect_weights[:,0].numpy() * np.array([main_effect_norm])
# * self.output_layer.main_effect_switcher[:,0].numpy()).reshape([-1, 1])
#interaction_norm = [self.interact_blocks.interacts[i].moving_norm.numpy()[0] for i in range(self.interact_num_added)]
#gamma = (self.output_layer.interaction_weights[:,0].numpy()[:self.interact_num_added]
# * np.array([interaction_norm])
# * self.output_layer.interaction_switcher[:,0].numpy()[:self.interact_num_added]).reshape([-1, 1])
#gamma = np.vstack([gamma, np.zeros((self.interact_num - self.interact_num_added, 1)).reshape([-1, 1]) ])
beta = []
gamma = []
for i in range(self.input_num):
beta.append(np.std(self.maineffect_blocks.subnets[i](tr_x[:,i].reshape(-1,1),training=False),ddof=1))
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
inputs = tf.concat([tr_x[:,idx1].reshape(-1,1),tr_x[:,idx2].reshape(-1,1)],1)
gamma.append(np.std(self.interact_blocks.interacts[interact_id](inputs,training=False),ddof=1))
beta = np.array(beta * self.output_layer.main_effect_switcher[:,0].numpy()).reshape(-1,1)
gamma = np.array(gamma * self.output_layer.interaction_switcher[:,0].numpy()).reshape(-1,1)
return beta, gamma
def estimate_density(self, x):
n_samples = x.shape[0]
self.data_dict_density = {}
for indice in range(self.input_num):
feature_name = list(self.variables_names)[indice]
if indice in self.numerical_index_list:
sx = self.meta_info[feature_name]["scaler"]
density, bins = np.histogram(sx.inverse_transform(x[:,[indice]]), bins=10, density=True)
self.data_dict_density.update({feature_name:{"density":{"names":bins,"scores":density}}})
elif indice in self.categ_index_list:
unique, counts = np.unique(x[:, indice], return_counts=True)
density = np.zeros((len(self.meta_info[feature_name]["values"])))
density[unique.astype(int)] = counts / n_samples
self.data_dict_density.update({feature_name:{"density":{"names":np.arange(len(self.meta_info[feature_name]["values"])),
"scores":density}}})
def coding(self,y):
re = np.zeros((y.shape[0],4))
for i in range(y.shape[0]):
if y[i]== 1:
re[i] = np.array([0,0,0,0])
elif y[i] ==2:
re[i] = np.array([1,0,0,0])
elif y[i] ==3:
re[i] = np.array([1,1,0,0])
elif y[i] ==4:
re[i] = np.array([1,1,1,0])
elif y[i] ==5:
re[i] = np.array([1,1,1,1])
return re
def scan(self, x, threshold):
res = np.zeros((x.shape[0],1))
for i in range(x.shape[0]):
res[i] = 5
for j in range(x.shape[1]):
if x[i,j] < threshold:
res[i] = j+1
break
#elif j==4:
# res[i] = j+1
# break
return res
def fit_main_effect(self, tr_x, tr_y, val_x, val_y):
## specify grid points
for i in range(self.input_num):
if i in self.categ_index_list:
length = len(self.meta_info[self.variables_names[i]]["values"])
input_grid = np.arange(len(self.meta_info[self.variables_names[i]]["values"]))
else:
length = self.main_grid_size
input_grid = np.linspace(0, 1, length)
pdf_grid = np.ones([length]) / length
self.maineffect_blocks.subnets[i].set_pdf(np.array(input_grid, dtype=np.float32).reshape([-1, 1]),
np.array(pdf_grid, dtype=np.float32).reshape([1, -1]))
last_improvement = 0
best_validation = np.inf
train_size = tr_x.shape[0]
for epoch in range(self.main_effect_epochs):
if self.task_type != "Ordinal_Regression":
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_training.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_training.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects training epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_training[-1], self.err_val_main_effect_training[-1]))
if self.err_val_main_effect_training[-1] < best_validation:
best_validation = self.err_val_main_effect_training[-1]
last_improvement = epoch
if epoch - last_improvement > self.early_stop_thres:
if self.verbose:
print("Early stop at epoch %d, with validation loss: %0.5f" % (epoch + 1, self.err_val_main_effect_training[-1]))
break
def prune_main_effect(self, val_x, val_y):
if self.multi_type_num == 0:
self.main_effect_val_loss = []
sorted_index, componment_scales = self.get_main_effect_rank(0,self.tr_x)
self.output_layer.main_effect_switcher.assign(tf.constant(np.zeros((self.input_num, 1)), dtype=tf.float32))
self.main_effect_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.input_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[selected_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.append(val_loss)
best_loss = np.min(self.main_effect_val_loss)
if np.sum((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = np.where((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = np.argmin(self.main_effect_val_loss)
self.active_main_effect_index = sorted_index[:best_idx]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[self.active_main_effect_index] = 1
self.output_layer.main_effect_switcher.assign(tf.constant(main_effect_switcher, dtype=tf.float32))
else:
self.active_main_effect_index = []
for i in range(self.multi_type_num):
tmp1 = self.output_layer.main_effect_switcher.numpy()
tmp1[:,i] = np.zeros(self.input_num).ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp1, dtype=tf.float32))
sorted_index, componment_scales = self.get_main_effect_rank(i)
self.main_effect_val_loss = []
self.main_effect_val_loss.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False) )
for idx in range(self.input_num):
selected_index = sorted_index[:(idx + 1)]
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[selected_index] = 1
tmp = self.output_layer.main_effect_switcher.numpy()
tmp[:,i] = main_effect_switcher.ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp, dtype=tf.float32))
val_loss = self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False)
self.main_effect_val_loss.append(val_loss)
best_loss = np.min(self.main_effect_val_loss)
if np.sum((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main) > 0:
best_idx = np.where((self.main_effect_val_loss / best_loss - 1) < self.loss_threshold_main)[0][0]
else:
best_idx = np.argmin(self.main_effect_val_loss)
self.active_main_effect_index.append(sorted_index[:best_idx])
main_effect_switcher = np.zeros((self.input_num, 1))
main_effect_switcher[self.active_main_effect_index[-1].astype(int)] = 1
tmp2 = self.output_layer.main_effect_switcher.numpy()
tmp2[:,i] = main_effect_switcher.ravel()
self.output_layer.main_effect_switcher.assign(tf.constant(tmp2, dtype=tf.float32))
def fine_tune_main_effect(self, tr_x, tr_y, val_x, val_y):
train_size = tr_x.shape[0]
for epoch in range(self.tuning_epochs):
shuffle_index = np.arange(tr_x.shape[0])
np.random.shuffle(shuffle_index)
tr_x = tr_x[shuffle_index]
tr_y = tr_y[shuffle_index]
for iterations in range(train_size // self.batch_size):
offset = (iterations * self.batch_size) % train_size
batch_xx = tr_x[offset:(offset + self.batch_size), :]
batch_yy = tr_y[offset:(offset + self.batch_size)]
self.train_main_effect(tf.cast(batch_xx, tf.float32), batch_yy)
self.err_train_main_effect_tuning.append(self.evaluate(tr_x, tr_y, main_effect_training=False, interaction_training=False))
self.err_val_main_effect_tuning.append(self.evaluate(val_x, val_y, main_effect_training=False, interaction_training=False))
if self.verbose & (epoch % 1 == 0):
print("Main effects tuning epoch: %d, train loss: %0.5f, val loss: %0.5f" %
(epoch + 1, self.err_train_main_effect_tuning[-1], self.err_val_main_effect_tuning[-1]))
def add_interaction(self, tr_x, tr_y, val_x, val_y):
tr_pred = self.__call__(tf.cast(tr_x, tf.float32), main_effect_training=False, interaction_training=False).numpy().astype(np.float64)
val_pred = self.__call__(tf.cast(val_x, tf.float32), main_effect_training=False, interaction_training=False).numpy().astype(np.float64)
if self.multi_type_num == 0:
interaction_list_all = get_interaction_list(tr_x, val_x, tr_y.ravel(), val_y.ravel(),
tr_pred.ravel(), val_pred.ravel(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=self.active_main_effect_index,
user_feature_list=self.user_feature_list,
item_feature_list=self.item_feature_list,
interaction_restrict=self.interaction_restrict)
self.interaction_list = interaction_list_all[:self.interact_num]
self.interact_num_added = len(self.interaction_list)
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_added] = 1
self.output_layer.interaction_switcher.assign(tf.constant(interaction_switcher, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
else:
active_index_inter = []
for fe_num in range(self.input_num):
count_int = 0
for num in range(self.multi_type_num):
if (self.active_main_effect_index[num]==fe_num).sum()==1:
count_int = count_int +1
if count_int > self.multi_type_num/5:
active_index_inter.append(fe_num)
interaction_list_all = get_interaction_list(tr_x, val_x, tr_y.ravel(), val_y.ravel(),
tr_pred.ravel(), val_pred.ravel(),
self.variables_names,
self.feature_type_list,
task_type=self.task_type,
active_main_effect_index=active_index_inter)
self.interaction_list = interaction_list_all[:self.interact_num]
self.interact_num_added = len(self.interaction_list)
interaction_switcher = np.zeros((self.interact_num, 1))
interaction_switcher[:self.interact_num_added] = 1
for i in range(self.multi_type_num):
tmp = self.output_layer.interaction_switcher.numpy()
tmp[:,i] = interaction_switcher.ravel()
self.output_layer.interaction_switcher.assign(tf.constant(tmp, dtype=tf.float32))
self.interact_blocks.set_interaction_list(self.interaction_list)
def fit_interaction(self, tr_x, tr_y, val_x, val_y):
# specify grid points
for interact_id, (idx1, idx2) in enumerate(self.interaction_list):
feature_name1 = self.variables_names[idx1]
feature_name2 = self.variables_names[idx2]
if feature_name1 in self.categ_variable_list:
length1 = len(self.meta_info[feature_name1]["values"])
length1_grid = np.arange(length1)
else:
length1 = self.interact_grid_size
length1_grid = np.linspace(0, 1, length1)
if feature_name2 in self.categ_variable_list:
length2 = len(self.meta_info[feature_name2]["values"])
length2_grid = | np.arange(length2) | numpy.arange |
"""
See explanation below in the __name__ guard.
"""
from cartpole import Controller, CartPole, simulate, G
from nominal_control import ControlLQR
import numpy as np
from qpsolvers import solve_qp
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.animation as animation
class ASIF(Controller):
"""Active Set Invariance Filter
Implementation of the popular CBF-QP
ASIF takes in the nominal control signal(u_nom) and filters it to
generate u_filtered. Under the hood it is an optimization problem
with following objective function:
u_f = argmin || u_f - u_nom ||^2
s.t. h_dot(x, u_f) >= -gamma*h(x)
___________________ ________
x | | u_nom | | u_filtered
-----> | nominal control | ------> | ASIF | ------------->
|___________________| |________|
"""
def __init__(self, nominal_control, cp: CartPole, barrier_cart_vel,
gamma, asif_enabled=True, use_nonlinear_dynamics=True):
"""
For our case of cartpole, limitations on cart velocity is enforced
by this ASIF.
"""
self.nominal_control = nominal_control
self.cp = cp
self.barrier_cart_vel = barrier_cart_vel
self.gamma = gamma
self.asif_enabled = asif_enabled
self.use_nonlinear_dynamics = use_nonlinear_dynamics
if self.use_nonlinear_dynamics:
self._h_dot = self._h_dot_nonlinear
else:
self._h_dot = self._h_dot_linear
self._log = {
'cbf_nominal': [],
'cbf_filtered': [],
'qp_g_nominal': [],
'qp_g_filtered': [],
'qp_h': [],
'u_nom': [],
'u_filtered': [],
}
def control_law(self, state):
u_nominal = self.nominal_control(state)
u_filtered = self._asif(u_nominal, state)
if self.asif_enabled is False:
u_filtered = u_nominal
# if np.isclose(u_filtered, u_nominal)[0] == False:
# print(f"ASIF active! {u_nominal=}, {u_filtered=}")
return u_filtered
def _asif(self, u_nominal, state):
m_1 = self.cp.m_1
m_2 = self.cp.m_2
l = self.cp.l
# objective function, same for all CBF-QP
p = np.array([1.])
q = np.array([-u_nominal])
if self.use_nonlinear_dynamics:
# the terms come from self.h_dot_nonlinear, organized for standart
# qp solver format
delta = m_2*np.sin(state[2])**2 + m_1
if state[1] >= 0:
g = np.array([1/delta])
h = -1 * np.array([m_2*l*(state[3]**2)*np.sin(state[2])/delta \
+ m_2*G*np.sin(state[2])*np.cos(state[2])/delta]) \
+ self.gamma*(self._h(state))
else:
g = np.array([-1/delta])
h = np.array([m_2*l*(state[3]**2)*np.sin(state[2])/delta \
+ m_2*G*np.sin(state[2])*np.cos(state[2])/delta]) \
+ self.gamma*(self._h(state))
else:
# the terms come from self.h_dot_linear, organized for standart
# qp solver format
if state[1] >= 0:
g = np.array([1/m_1])
h = np.array([m_2*G/m_1*state[2] + self.gamma*self._h(state)])
else:
g = np.array([-1/m_1])
h = np.array([-m_2*G/m_1*state[2] + self.gamma*self._h(state)])
u_filtered = solve_qp(p, q, g, h,
# lb=np.array([-80.]),
# ub=np.array([80.]),
solver="cvxopt")
self._log['cbf_filtered'].append(self.cbf_cstr(state, u_filtered))
self._log['cbf_nominal'].append(self.cbf_cstr(state, u_nominal))
self._log['qp_g_filtered'].append(g@u_filtered)
self._log['qp_g_nominal'].append(g@u_nominal)
self._log['qp_h'].append(h)
self._log['u_nom'].append(u_nominal)
self._log['u_filtered'].append(u_filtered)
return u_filtered
def _h(self, state):
if state[1] >= 0:
return self.barrier_cart_vel - state[1]
else:
return self.barrier_cart_vel + state[1]
def _h_dot_nonlinear(self, state, u):
""" Equations from cartpole._gen_dynamics._dynamics"""
m_1 = self.cp.m_1
m_2 = self.cp.m_2
l = self.cp.l
delta = m_2*np.sin(state[2])**2 + m_1
if state[1] >= 0:
return -1 * (m_2*l*(state[3]**2)*np.sin(state[2])/delta \
+ m_2*G*np.sin(state[2])*np.cos(state[2])/delta) - u/delta
else:
return (m_2*l*(state[3]**2)*np.sin(state[2])/delta \
+ m_2*G*np.sin(state[2])* | np.cos(state[2]) | numpy.cos |
import os, sys, random, time, copy
from skimage import io, transform
import numpy as np
import scipy.io as sio
from scipy import misc
import matplotlib.pyplot as plt
import PIL.Image
import skimage.transform
import blosc, struct
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.bin'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class Joint_xLabel_train_dataLoader(Dataset):
def __init__(self, real_root_dir, syn_root_dir, size=[240, 320], rgb=True, downsampleDepthFactor=1, paired_data=False):
self.real_root_dir = real_root_dir
self.syn_root_dir = syn_root_dir
self.size = size
self.rgb = rgb
self.current_set_len = 0
self.real_path2files = []
self.syn_path2files = []
self.downsampleDepthFactor = downsampleDepthFactor
self.NYU_MIN_DEPTH_CLIP = 0.0
self.NYU_MAX_DEPTH_CLIP = 10.0
self.paired_data = paired_data # whether 1 to 1 matching
self.augment = None # whether to augment each batch data
self.x_labels = False # whether to collect extra labels in synthetic data, such as segmentation or instance boundaries
self.set_name = 'train' # Joint_xLabel_train_dataLoader is only used in training phase
real_curfilenamelist = os.listdir(os.path.join(self.real_root_dir, self.set_name, 'rgb'))
for fname in sorted(real_curfilenamelist):
if is_image_file(fname):
path = os.path.join(self.real_root_dir, self.set_name, 'rgb', fname)
self.real_path2files.append(path)
self.real_set_len = len(self.real_path2files)
syn_curfilenamelist = os.listdir(os.path.join(self.syn_root_dir, self.set_name, 'rgb'))
for fname in sorted(syn_curfilenamelist):
if is_image_file(fname):
path = os.path.join(self.syn_root_dir, self.set_name, 'rgb', fname)
self.syn_path2files.append(path)
self.syn_set_len = len(self.syn_path2files)
self.TF2tensor = transforms.ToTensor()
self.TF2PIL = transforms.ToPILImage()
self.TFNormalize = transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
self.funcResizeTensor = nn.Upsample(size=self.size, mode='nearest', align_corners=None)
self.funcResizeDepth = nn.Upsample(size=[int(self.size[0]*self.downsampleDepthFactor),
int(self.size[1]*self.downsampleDepthFactor)],
mode='nearest', align_corners=None)
def __len__(self):
# looping over real dataset
return self.real_set_len
def __getitem__(self, idx):
real_filename = self.real_path2files[idx % self.real_set_len]
rand_idx = random.randint(0, self.syn_set_len - 1)
if self.paired_data:
assert self.real_set_len == self.syn_set_len
syn_filename = self.syn_path2files[idx]
else:
syn_filename = self.syn_path2files[rand_idx]
if np.random.random(1) > 0.5:
self.augment = True
else:
self.augment = False
real_img, real_depth = self.fetch_img_depth(real_filename)
syn_img, syn_depth = self.fetch_img_depth(syn_filename)
return_dict = {'real': [real_img, real_depth], 'syn': [syn_img, syn_depth]}
if self.x_labels:
# not really used in this project
extra_label_list = self.fetch_syn_extra_labels(syn_filename)
return_dict = {'real': [real_img, real_depth], 'syn': [syn_img, syn_depth], 'syn_extra_labels': extra_label_list}
return return_dict
def fetch_img_depth(self, filename):
image = PIL.Image.open(filename)
image = np.array(image, dtype=np.float32) / 255.
if self.set_name == 'train':
depthname = filename.replace('rgb','depth_inpainted').replace('png','bin')
else:
# use real depth for validation and testing
depthname = filename.replace('rgb','depth').replace('png','bin')
depth = read_array_compressed(depthname)
if self.set_name=='train' and self.augment:
image = np.fliplr(image).copy()
depth = np.fliplr(depth).copy()
# rescale depth samples in training phase
if self.set_name == 'train':
depth = np.clip(depth, self.NYU_MIN_DEPTH_CLIP, self.NYU_MAX_DEPTH_CLIP) # [0, 10]
depth = ((depth/self.NYU_MAX_DEPTH_CLIP) - 0.5) * 2.0 # [-1, 1]
image = self.TF2tensor(image)
image = self.TFNormalize(image)
image = image.unsqueeze(0)
depth = np.expand_dims(depth, 2)
depth = self.TF2tensor(depth)
depth = depth.unsqueeze(0)
if "nyu" in filename:
image = processNYU_tensor(image)
depth = processNYU_tensor(depth)
image = self.funcResizeTensor(image)
depth = self.funcResizeTensor(depth)
if self.downsampleDepthFactor != 1:
depth = self.funcResizeDepth(depth)
if self.rgb:
image = image.squeeze(0)
else:
image = image.mean(1)
image = image.squeeze(0).unsqueeze(0)
depth = depth.squeeze(0)
return image, depth
def fetch_syn_extra_labels(self, filename):
# currently only fetch segmentation labels and instance boundaries
seg_name = filename.replace('rgb','semantic_seg')
ib_name = filename.replace('rgb','instance_boundary')
seg_np = np.array(PIL.Image.open(seg_name), dtype=np.float32)
ib_np = np.array(PIL.Image.open(ib_name), dtype=np.float32)
if self.set_name=='train' and self.augment:
seg_np = np.fliplr(seg_np).copy()
ib_np = np.fliplr(ib_np).copy()
seg_np = np.expand_dims(seg_np, 2)
seg_tensor = self.TF2tensor(seg_np)
ib_np = | np.expand_dims(ib_np, 2) | numpy.expand_dims |
import numpy as np
from scipy.optimize import root_scalar
class sieplasmajet(object):
def __init__(self, theta_E_g, eta, phi, psi0_plasma_num, theta_0_num, B, C, delta_rs, deltab_10, deltab_20):
self.theta_E_g = theta_E_g
self.eta = eta
self.phi = phi
self.psi0_plasma_num = psi0_plasma_num
self.theta_0_num = theta_0_num
self.B = B
self.C = C
self.delta_rs = delta_rs
self.deltab_10 = deltab_10
self.deltab_20 = deltab_20
def f(r):
tmp_f = r - theta_E_g + C/r * (r/B/theta_0_num)**C * psi0_plasma_num * np.exp(-(r/B/theta_0_num)**C)
return tmp_f
zero = root_scalar(f, bracket=[theta_E_g*.1, theta_E_g*1.9], method='bisect')
self.theta_E = zero.root
self.r = zero.root
r = self.r
tmp_psi = theta_E_g*r*np.sqrt(1.-eta*np.cos(2.*phi)) + \
psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi = tmp_psi
tmp_dpsi = theta_E_g*r*(np.sqrt( 1. - eta*np.cos(2*phi)) - 1)
self.dpsi = tmp_dpsi
tmp_psi0 = theta_E_g*r + psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi0 = tmp_psi0
tmp_psi_plasma = psi0_plasma_num*np.exp(-(r/B/theta_0_num)**C)
self.psi_plasma = tmp_psi_plasma
tmp_ddpsi_dr = theta_E_g*(np.sqrt( 1. - eta*np.cos(2*phi)) - 1)
self.ddpsi_dr = tmp_ddpsi_dr
tmp_ddpsi_dphi = theta_E_g*r*eta*np.sin(2.*phi)/np.sqrt(1.-eta*np.cos(2.*phi))
self.ddpsi_dphi = tmp_ddpsi_dphi
tmp_d2dpsi_dphi2 = theta_E_g*r*eta*( 2*np.cos(2.*phi)/np.sqrt(1.-eta*np.cos(2.*phi)) - (1.-eta*np.cos(2.*phi))**(-3/2)*eta*np.sin(2*phi)**2)
self.d2dpsi_dphi2 = tmp_d2dpsi_dphi2
tmp_d2psi0 = self.psi_plasma * ( - C*(C-1)/r**2*(r/B/theta_0_num)**C + (C/r*(r/B/theta_0_num)**C)**2 )
self.d2psi0_dr2 = tmp_d2psi0
Delta = delta_rs**2 - ( 1/r*self.ddpsi_dphi - deltab_10*np.sin(phi) + deltab_20*np.cos(phi) )**2
delta_r_1 = 1/(1 - self.d2psi0_dr2 )*(self.ddpsi_dr + deltab_10*np.cos(phi) + deltab_20*np.sin(phi) + np.sqrt(Delta))
delta_r_2 = 1/(1 - self.d2psi0_dr2 )*(self.ddpsi_dr + deltab_10*np.cos(phi) + deltab_20* | np.sin(phi) | numpy.sin |
import numpy as np
import lsst.pex.config as pexConfig
import lsst.afw.image as afwImage
import lsst.afw.math as afwMath
import lsst.pipe.base as pipeBase
import lsst.pipe.base.connectionTypes as cT
from .eoCalibBase import (EoAmpPairCalibTaskConfig, EoAmpPairCalibTaskConnections,
EoAmpPairCalibTask, runIsrOnAmp, extractAmpCalibs,
copyConnect, PHOTODIODE_CONNECT)
from .eoFlatPairData import EoFlatPairData
from .eoFlatPairUtils import DetectorResponse
__all__ = ["EoFlatPairTask", "EoFlatPairTaskConfig"]
class EoFlatPairTaskConnections(EoAmpPairCalibTaskConnections):
photodiodeData = copyConnect(PHOTODIODE_CONNECT)
outputData = cT.Output(
name="eoFlatPair",
doc="Electrial Optical Calibration Output",
storageClass="IsrCalib",
dimensions=("instrument", "detector"),
)
class EoFlatPairTaskConfig(EoAmpPairCalibTaskConfig,
pipelineConnections=EoFlatPairTaskConnections):
maxPDFracDev = pexConfig.Field("Maximum photodiode fractional deviation", float, default=0.05)
def setDefaults(self):
# pylint: disable=no-member
self.connections.outputData = "eoFlatPair"
self.isr.expectWcs = False
self.isr.doSaturation = False
self.isr.doSetBadRegions = False
self.isr.doAssembleCcd = False
self.isr.doBias = True
self.isr.doLinearize = False
self.isr.doDefect = False
self.isr.doNanMasking = False
self.isr.doWidenSaturationTrails = False
self.isr.doDark = True
self.isr.doFlat = False
self.isr.doFringe = False
self.isr.doInterpolate = False
self.isr.doWrite = False
self.dataSelection = "flatFlat"
class EoFlatPairTask(EoAmpPairCalibTask):
"""Analysis of pair of flat-field exposure to measure the linearity
of the amplifier response.
Output is stored as `lsst.eotask_gen3.EoFlatPairData` objects
"""
ConfigClass = EoFlatPairTaskConfig
_DefaultName = "eoFlatPair"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.statCtrl = afwMath.StatisticsControl()
def run(self, inputPairs, **kwargs): # pylint: disable=arguments-differ
""" Run method
Parameters
----------
inputPairs : `list` [`tuple` [`lsst.daf.Butler.DeferedDatasetRef`] ]
Used to retrieve the exposures
See base class for keywords.
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Output data in formatted tables
"""
camera = kwargs['camera']
nPair = len(inputPairs)
if nPair < 1:
raise RuntimeError("No valid input data")
det = inputPairs[0][0][0].get().getDetector()
amps = det.getAmplifiers()
ampNames = [amp.getName() for amp in amps]
outputData = self.makeOutputData(amps=ampNames, nAmps=len(amps), nPair=len(inputPairs),
camera=camera, detector=det)
photodiodePairs = kwargs.get('photodiodePairs', None)
if photodiodePairs is not None:
self.analyzePdData(photodiodePairs, outputData)
for iamp, amp in enumerate(amps):
ampCalibs = extractAmpCalibs(amp, **kwargs)
for iPair, inputPair in enumerate(inputPairs):
if len(inputPair) != 2:
self.log.warn("exposurePair %i has %i items" % (iPair, len(inputPair)))
continue
calibExp1 = runIsrOnAmp(self, inputPair[0][0].get(parameters={"amp": iamp}), **ampCalibs)
calibExp2 = runIsrOnAmp(self, inputPair[1][0].get(parameters={"amp": iamp}), **ampCalibs)
amp2 = calibExp1.getDetector().getAmplifiers()[0]
self.analyzeAmpPairData(calibExp1, calibExp2, outputData, amp2, iPair)
self.analyzeAmpRunData(outputData, iamp, amp2)
return pipeBase.Struct(outputData=outputData)
def makeOutputData(self, amps, nAmps, nPair, **kwargs): # pylint: disable=arguments-differ,no-self-use
"""Construct the output data object
Parameters
----------
amps : `Iterable` [`str`]
The amplifier names
nAmp : `int`
Number of amplifiers
nPair : `int`
Number of exposure pairs
kwargs are passed to `lsst.eotask_gen3.EoCalib` base class constructor
Returns
-------
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
return EoFlatPairData(amps=amps, nAmp=nAmps, nPair=nPair, **kwargs)
def analyzePdData(self, photodiodeDataPairs, outputData):
""" Analyze the photodidode data and fill the output table
Parameters
----------
photodiodeDataPairs : `list` [`tuple` [`astropy.Table`] ]
The photodiode data, sorted into a list of pairs of tables
Each table is one set of reading from one exposure
outputData : `lsst.eotask_gen3.EoFlatPairData`
Container for output data
"""
outTable = outputData.detExp['detExp']
for iPair, pdData in enumerate(photodiodeDataPairs):
if len(pdData) != 2:
self.log.warn("photodiodePair %i has %i items" % (iPair, len(pdData)))
continue
pd1 = self.getFlux(pdData[0].get())
pd2 = self.getFlux(pdData[1].get())
if | np.abs((pd1 - pd2)/((pd1 + pd2)/2.)) | numpy.abs |
# @Author: lshuns
# @Date: 2021-04-05, 21:44:40
# @Last modified by: lshuns
# @Last modified time: 2021-05-05, 8:44:30
### everything about Line/Point plot
__all__ = ["LinePlotFunc", "LinePlotFunc_subplots", "ErrorPlotFunc", "ErrorPlotFunc_subplots"]
import math
import logging
import numpy as np
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, LogLocator
from .CommonInternal import _vhlines
logging.basicConfig(format='%(name)s : %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def LinePlotFunc(outpath,
xvals, yvals,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, fillstyles=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_min_label=True, xtick_spe=None, ytick_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, invertX=False, ylog=False, invertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full'
plt.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_min_label:
if xlog:
ax.xaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_minor_locator(AutoMinorLocator())
if ytick_min_label:
if ylog:
ax.yaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if invertX:
plt.gca().invert_xaxis()
if invertY:
plt.gca().invert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def LinePlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, fillstyles_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_min_label=True, xtick_spe=None, ytick_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, invertX=False, ylog=False, invertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Line plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if fillstyles_list is not None:
fillstyles = fillstyles_list[i_plot]
else:
fillstyles = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if fillstyles is not None:
fillstyle = fillstyles[i]
else:
fillstyle = 'full'
ax.plot(xvl, yvl, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, fillstyle=fillstyle)
if (LABELs is not None) and (i_plot == 0):
ax.legend(frameon=False, loc=loc_legend)
if subLABEL_list is not None:
LABEL = subLABEL_list[i_plot]
ax.text(subLABEL_locX, subLABEL_locY, LABEL, transform=ax.transAxes)
if XRANGE is not None:
ax.set_xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
ax.set_ylim(YRANGE[0], YRANGE[1])
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths, ax=ax)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths, ax=ax)
if xtick_min_label:
if xlog:
ax.xaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_minor_locator(AutoMinorLocator())
if ytick_min_label:
if ylog:
ax.yaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if invertY:
plt.gca().invert_yaxis()
if invertX:
plt.gca().invert_xaxis()
i_plot +=1
fig.text(0.5, 0.04, XLABEL, ha='center')
fig.text(0.04, 0.5, YLABEL, va='center', rotation='vertical')
if TITLE is not None:
fig.text(0.5, 0.90, TITLE, ha='center')
if outpath == 'show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Line plot saved as", outpath)
def ErrorPlotFunc(outpath,
xvals, yvals, yerrs,
COLORs, LABELs=None, LINEs=None, LINEWs=None, POINTs=None, POINTSs=None, ERRORSIZEs=None,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_min_label=True, xtick_spe=None, ytick_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, invertX=False, ylog=False, invertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple parameters
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
fig, ax = plt.subplots()
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = np.array(yerr)
yerr = np.vstack([yerr[0], yerr[1]])
CR = COLORs[i]
if LABELs is not None:
LAB = LABELs[i]
else:
LAB = None
if LINEs is not None:
LN = LINEs[i]
else:
LN = '--'
if LINEWs is not None:
LW = LINEWs[i]
else:
LW = 1
if POINTs is not None:
PI = POINTs[i]
else:
PI = 'o'
if POINTSs is not None:
MS = POINTSs[i]
else:
MS = 2
if ERRORSIZEs is not None:
ERRORSIZE = ERRORSIZEs[i]
else:
ERRORSIZE = 2
ax.errorbar(xvl, yvl, yerr=yerr, color=CR, label=LAB, linestyle=LN, linewidth=LW, marker=PI, markersize=MS, capsize=ERRORSIZE)
if XRANGE is not None:
plt.xlim(XRANGE[0], XRANGE[1])
if YRANGE is not None:
plt.ylim(YRANGE[0], YRANGE[1])
if xlog:
plt.xscale('log')
if ylog:
plt.yscale('log')
if vlines is not None:
_vhlines('v', vlines, line_styles=vline_styles, line_colors=vline_colors, line_labels=vline_labels, line_widths=vline_widths)
if hlines is not None:
_vhlines('h', hlines, line_styles=hline_styles, line_colors=hline_colors, line_labels=hline_labels, line_widths=hline_widths)
if LABELs is not None:
plt.legend(frameon=False, loc=loc_legend)
if xtick_min_label:
if xlog:
ax.xaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.xaxis.set_minor_locator(AutoMinorLocator())
if ytick_min_label:
if ylog:
ax.yaxis.set_minor_locator(LogLocator(base=10.0, subs=None, numticks=10))
else:
ax.yaxis.set_minor_locator(AutoMinorLocator())
if xtick_spe is not None:
plt.xticks(xtick_spe[0], xtick_spe[1])
if ytick_spe is not None:
plt.yticks(ytick_spe[0], ytick_spe[1])
if invertX:
plt.gca().invert_xaxis()
if invertY:
plt.gca().invert_yaxis()
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
if TITLE is not None:
plt.title(TITLE)
if outpath=='show':
plt.show()
plt.close()
else:
plt.savefig(outpath, dpi=300)
plt.close()
plt.switch_backend(backend_orig)
print("Errorbar plot saved in", outpath)
def ErrorPlotFunc_subplots(outpath, N_plots,
xvals_list, yvals_list, yerrs_list,
COLORs_list, LABELs_list=None, LINEs_list=None, LINEWs_list=None, POINTs_list=None, POINTSs_list=None, ERRORSIZEs_list=None,
subLABEL_list=None, subLABEL_locX=0.1, subLABEL_locY=0.8,
XRANGE=None, YRANGE=None,
XLABEL=None, YLABEL=None, TITLE=None,
xtick_min_label=True, xtick_spe=None, ytick_min_label=True, ytick_spe=None,
vlines=None, vline_styles=None, vline_colors=None, vline_labels=None, vline_widths=None,
hlines=None, hline_styles=None, hline_colors=None, hline_labels=None, hline_widths=None,
xlog=False, invertX=False, ylog=False, invertY=False, loc_legend='best',
font_size=12, usetex=False):
"""
Errorbar plot for multiple subplots
"""
# font size
plt.rc('font', size=font_size)
# tex
plt.rcParams["text.usetex"] = usetex
if outpath != 'show':
backend_orig = plt.get_backend()
plt.switch_backend("agg")
N_rows = math.ceil(N_plots**0.5)
N_cols = math.ceil(N_plots/N_rows)
fig, axs = plt.subplots(N_rows, N_cols, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
i_plot = 0
for i_row in range(N_rows):
for i_col in range(N_cols):
if i_plot >= N_plots:
if N_rows == 1:
axs[i_col].axis('off')
elif N_cols == 1:
axs[i_row].axis('off')
else:
axs[i_row, i_col].axis('off')
else:
if (N_rows==1) and (N_cols == 1):
ax = axs
elif N_rows == 1:
ax = axs[i_col]
elif N_cols == 1:
ax = axs[i_row]
else:
ax = axs[i_row, i_col]
xvals = xvals_list[i_plot]
yvals = yvals_list[i_plot]
yerrs = yerrs_list[i_plot]
COLORs = COLORs_list[i_plot]
if LABELs_list is not None:
LABELs = LABELs_list[i_plot]
else:
LABELs = None
if LINEs_list is not None:
LINEs = LINEs_list[i_plot]
else:
LINEs = None
if LINEWs_list is not None:
LINEWs = LINEWs_list[i_plot]
else:
LINEWs = None
if POINTs_list is not None:
POINTs = POINTs_list[i_plot]
else:
POINTs = None
if POINTSs_list is not None:
POINTSs = POINTSs_list[i_plot]
else:
POINTSs = None
if ERRORSIZEs_list is not None:
ERRORSIZEs = ERRORSIZEs_list[i_plot]
else:
ERRORSIZEs = None
for i, xvl in enumerate(xvals):
yvl = yvals[i]
yerr = yerrs[i]
if yerr is not None:
yerr = | np.array(yerr) | numpy.array |
from PyUnityVibes.UnityFigure import UnityFigure
import time, math
import numpy as np
# Function of the derivative of X
def xdot(x, u):
return np.array([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])], [u[0, 0]], [u[1, 0]]])
# Function witch return the command to follow to assure the trajectory
def control(x, w, dw):
A = np.array([[-x[3, 0]*math.sin(x[2, 0]), math.cos(x[2, 0])], [x[3, 0]*math.cos(x[2, 0]), math.sin(x[2, 0])]])
y = np.array([[x[0, 0]], [x[1, 0]]])
dy = np.array([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])]])
v = w - y + 2*(dw - dy)
return np.linalg.inv(A) @ v
# Function for the command with supervisor - alpha the time step between the follower and followed
def followSupervisor(alpha):
w = np.array([[Lx * math.sin(0.1 * (t-alpha))], [Ly * math.cos(0.1 * (t-alpha))]])
dw = np.array([[Lx * 0.1 * math.cos(0.1 * (t-alpha))], [-Ly * 0.1 * math.sin(0.1 * (t-alpha))]])
return w, dw
if __name__ == "__main__":
# Initialization of the figure
# Parameters:
# figType: the dimension of the figure (see UnityFigure.FIGURE_*)
# scene: the scene to be loaded (see UnityFigure.SCENE_*)
figure = UnityFigure(UnityFigure.FIGURE_3D, UnityFigure.SCENE_EMPTY)
time.sleep(1)
# Initialization variables
dt = 0.16
xa = | np.array([[10], [0], [1], [1]]) | numpy.array |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
"""
import os
from copy import deepcopy
from nibabel import load, funcs, Nifti1Image
import numpy as np
from ..interfaces.base import (
BaseInterface,
traits,
InputMultiPath,
OutputMultiPath,
TraitedSpec,
File,
BaseInterfaceInputSpec,
isdefined,
)
from ..utils.filemanip import ensure_list, save_json, split_filename
from ..utils.misc import find_indices, normalize_mc_params
from .. import logging, config
iflogger = logging.getLogger("nipype.interface")
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == "NIPY":
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
params = normalize_mc_params(params, source)
# process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], [-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params) :]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ("AFNI", "FSFAST"):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
affines = [_get_affine_matrix(mc[i, :], source) for i in range(mc.shape[0])]
return _calc_norm_affine(affines, use_differences, brain_pts)
def _calc_norm_affine(affines, use_differences, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
affines : list of [4 x 4] affine matrices
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((len(affines), n_pts))
if brain_pts is not None:
displacement = np.zeros((len(affines), int(n_pts / 3)))
for i, affine in enumerate(affines):
newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = np.sqrt(
np.sum(
np.power(
np.reshape(newpos[i, :], (3, all_pts.shape[1]))
- all_pts[0:3, :],
2,
),
axis=0,
)
)
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(len(affines))
if use_differences:
newpos = np.concatenate(
(np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0
)
for i in range(newpos.shape[0]):
normdata[i] = np.max(
np.sqrt(
np.sum(
np.reshape(
np.power(np.abs(newpos[i, :]), 2), (3, all_pts.shape[1])
),
axis=0,
)
)
)
else:
from scipy.signal import detrend
newpos = np.abs(detrend(newpos, axis=0, type="constant"))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(
File(exists=True),
desc=("Names of realigned functional data " "files"),
mandatory=True,
)
realignment_parameters = InputMultiPath(
File(exists=True),
mandatory=True,
desc=(
"Names of realignment "
"parameters corresponding to "
"the functional data files"
),
)
parameter_source = traits.Enum(
"SPM",
"FSL",
"AFNI",
"NiPy",
"FSFAST",
desc="Source of movement parameters",
mandatory=True,
)
use_differences = traits.ListBool(
[True, False],
minlen=2,
maxlen=2,
usedefault=True,
desc=(
"Use differences between successive"
" motion (first element) and "
"intensity parameter (second "
"element) estimates in order to "
"determine outliers. "
"(default is [True, False])"
),
)
use_norm = traits.Bool(
True,
usedefault=True,
requires=["norm_threshold"],
desc=(
"Uses a composite of the motion parameters in "
"order to determine outliers."
),
)
norm_threshold = traits.Float(
xor=["rotation_threshold", "translation_threshold"],
mandatory=True,
desc=(
"Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"
),
)
rotation_threshold = traits.Float(
mandatory=True,
xor=["norm_threshold"],
desc=("Threshold (in radians) to use to " "detect rotation-related outliers"),
)
translation_threshold = traits.Float(
mandatory=True,
xor=["norm_threshold"],
desc=("Threshold (in mm) to use to " "detect translation-related " "outliers"),
)
zintensity_threshold = traits.Float(
mandatory=True,
desc=(
"Intensity Z-threshold use to "
"detection images that deviate "
"from the mean"
),
)
mask_type = traits.Enum(
"spm_global",
"file",
"thresh",
mandatory=True,
desc=(
"Type of mask that should be used to mask the"
" functional data. *spm_global* uses an "
"spm_global like calculation to determine the"
" brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of "
"0s and 1s). *thresh* specifies a threshold "
"to use. By default all voxels are used,"
"unless one of these mask types are defined"
),
)
mask_file = File(exists=True, desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(
desc=("Mask threshold to be used if mask_type" " is 'thresh'.")
)
intersect_mask = traits.Bool(
True,
usedefault=True,
desc=("Intersect the masks when computed from " "spm_global."),
)
save_plot = traits.Bool(
True, desc="save plots containing outliers", usedefault=True
)
plot_type = traits.Enum(
"png",
"svg",
"eps",
"pdf",
desc="file type of the outlier plot",
usedefault=True,
)
bound_by_brainmask = traits.Bool(
False,
desc=(
"use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"
),
usedefault=True,
)
global_threshold = traits.Float(
8.0,
desc=("use this threshold when mask " "type equal's spm_global"),
usedefault=True,
)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(
File(exists=True),
desc=(
"One file for each functional run "
"containing a list of 0-based indices"
" corresponding to outlier volumes"
),
)
intensity_files = OutputMultiPath(
File(exists=True),
desc=(
"One file for each functional run "
"containing the global intensity "
"values determined from the "
"brainmask"
),
)
norm_files = OutputMultiPath(
File, desc=("One file for each functional run " "containing the composite norm")
)
statistic_files = OutputMultiPath(
File(exists=True),
desc=(
"One file for each functional run "
"containing information about the "
"different types of artifacts and "
"if design info is provided then "
"details of stimulus correlated "
"motion and a listing or artifacts "
"by event type."
),
)
plot_files = OutputMultiPath(
File,
desc=(
"One image file for each functional run " "containing the detected outliers"
),
)
mask_files = OutputMultiPath(
File,
desc=(
"One image file for each functional run "
"containing the mask used for global "
"signal calculation"
),
)
displacement_files = OutputMultiPath(
File,
desc=(
"One image file for each "
"functional run containing the "
"voxel displacement timeseries"
),
)
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers. If you
wish to use individual thresholds instead, import `Undefined` from
`nipype.interfaces.base` and set `....inputs.use_norm = Undefined`
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, (str, bytes)):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(
output_dir, "".join(("art.", filename, "_outliers.txt"))
)
intensityfile = os.path.join(
output_dir, "".join(("global_intensity.", filename, ".txt"))
)
statsfile = os.path.join(output_dir, "".join(("stats.", filename, ".txt")))
normfile = os.path.join(output_dir, "".join(("norm.", filename, ".txt")))
plotfile = os.path.join(
output_dir, "".join(("plot.", filename, ".", self.inputs.plot_type))
)
displacementfile = os.path.join(output_dir, "".join(("disp.", filename, ext)))
maskfile = os.path.join(output_dir, "".join(("mask.", filename, ext)))
return (
artifactfile,
intensityfile,
statsfile,
normfile,
plotfile,
displacementfile,
maskfile,
)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["outlier_files"] = []
outputs["intensity_files"] = []
outputs["statistic_files"] = []
outputs["mask_files"] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs["norm_files"] = []
if self.inputs.bound_by_brainmask:
outputs["displacement_files"] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs["plot_files"] = []
for i, f in enumerate(ensure_list(self.inputs.realigned_files)):
(
outlierfile,
intensityfile,
statsfile,
normfile,
plotfile,
displacementfile,
maskfile,
) = self._get_output_filenames(f, os.getcwd())
outputs["outlier_files"].insert(i, outlierfile)
outputs["intensity_files"].insert(i, intensityfile)
outputs["statistic_files"].insert(i, statsfile)
outputs["mask_files"].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs["norm_files"].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs["displacement_files"].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs["plot_files"].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(
np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
"r",
)
plt.xlabel("Scans - 0-based")
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
from scipy import signal
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, (str, bytes)):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.shape
data = nim.get_fdata(dtype=np.float32)
affine = nim.affine
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == "spm_global": # spm_global like calculation
iflogger.debug("art: using spm global")
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = np.nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info("not intersect_mask is True")
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > (np.nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp)
elif masktype == "file": # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_fdata(dtype=np.float32)
affine = maskimg.affine
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = np.nanmean(vol[mask])
elif masktype == "thresh": # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = np.nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = np.nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(
artifactfile,
intensityfile,
statsfile,
normfile,
plotfile,
displacementfile,
maskfile,
) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack(
(voxel_coords[0], np.vstack((voxel_coords[1], voxel_coords[2])))
).T
brain_pts = np.dot(
affine, np.hstack((coords, np.ones((coords.shape[0], 1)))).T
)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(
mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts,
)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = | np.zeros((x, y, z, timepoints), dtype=np.float64) | numpy.zeros |
import numpy as np
def getClosestFactors(n):
i = int(n ** 0.5)
while (n % i != 0):
i -= 1
return (i, int(n/i))
def getBoundary(x, r, n):
"""returns in the form [lower, upper)"""
lower = x - r
upper = x + r + 1
if lower < 0:
lower = 0
if upper > n:
upper = n
return (lower, upper)
def getRandomSample(array, n):
"""returns in the form (x, y, array[x, y])"""
if n > array.size:
raise ValueError("Sample size must be smaller than number of elements in array")
else:
idx = np.random.choice(array.shape[0], size=n, replace=False)
idy = np.random.choice(array.shape[1], size=n, replace=False)
sample = array[idx, idy]
return list(zip(idx, idy, sample))
def getNeighbours(array, randomSample, radius):
"""Get the neighbours of randomSample[:, 2] within a radius.
Border cases include -1 for missing neighbours."""
maxNeighbours = (2*radius + 1)**2 - 1
sampleSize = len(randomSample)
neighbours = np.full((sampleSize, maxNeighbours), -1)
height, width = array.shape
idx = list(zip(*randomSample))[0]
idy = list(zip(*randomSample))[1]
xspans = np.array([getBoundary(x, radius, height) for x in idx], dtype=np.uint32)
yspans = np.array([getBoundary(y, radius, width) for y in idy], dtype=np.uint32)
for i in range(sampleSize):
subgrid = np.ix_(range(*xspans[i]), range(*yspans[i]))
x_rel = idx[i] - xspans[i, 0]
y_rel = idy[i] - yspans[i, 0]
#get rid of patient zero in subarray
surrounding = np.delete(array[subgrid], x_rel*subgrid[1].shape[1] + y_rel)
neighbours[i, :surrounding.shape[0]] = surrounding
return neighbours
def updateGrid(array, community):
"""shuffle array based on Mersenne Twister algorithm in np.random"""
#shuffle grid along both axes
np.apply_along_axis(np.random.shuffle, 1, array)
np.random.shuffle(array)
#update locations of individuals
getLoc = lambda x : (x // array.shape[0], x % array.shape[1])
r = array.ravel()
for i in range(array.size):
community.people[r[i]].updateLoc(getLoc(i))
return array
def equalGridCrossing(grid1, grid2, n):
"""Shuffle n randomly selected individuals between grid1 and grid2.
Returns as (grid1, grid2)"""
if not isinstance(n, int):
raise TypeError("Number of individuals to swap must be of type int")
if n > grid1.size or n > grid2.size:
raise ValueError("number of individuals must be less than size of grid")
id1x = np.random.choice(grid1.shape[0], size=n, replace=False)
id1y = np.random.choice(grid1.shape[1], size=n, replace=False)
id2x = np.random.choice(grid2.shape[0], size=n, replace=False)
id2y = np.random.choice(grid2.shape[1], size=n, replace=False)
grid1[id1x, id1y], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x, id1y]
return (grid1, grid2)
def unequalGridCrossing(grid1, grid2, outGrid1, outGrid2):
"""Shuffle in a way that one grid loses abs(outGrid1 - outGrid2) individuals.
If outGrid1 is equal to outGrid2 call equalGridCrossing."""
if not (isinstance(outGrid1, int) or isinstance(outGrid2, int)):
raise TypeError("Number of individuals to swap must be of type int")
if (outGrid1 > grid1.size or outGrid2 > grid2.size):
raise ValueError("Cannot relocate more than grid population")
id1x = np.random.choice(grid1.shape[0], size=outGrid1, replace=False)
id1y = np.random.choice(grid1.shape[1], size=outGrid1, replace=False)
id2x = np.random.choice(grid2.shape[0], size=outGrid2, replace=False)
id2y = np.random.choice(grid2.shape[1], size=outGrid2, replace=False)
excess = abs(outGrid1 - outGrid2)
if outGrid1 > outGrid2:
#swap individuals that can be relocated in place
grid1[id1x[:-excess], id1y[:-excess]], grid2[id2x, id2y] = grid2[id2x, id2y], grid1[id1x[:-excess], id1y[:-excess]]
#swap excess
nrow = np.full(grid2.shape[1], -1)
nrow[:excess] = grid1[id1x[outGrid2:], id1y[outGrid2:]]
#mark lost individuals in grid1 as -1
grid1[id1x[outGrid2:], id1y[outGrid2:]] = -1
#stack the new row created
grid2 = np.vstack((grid2, nrow))
elif outGrid2 > outGrid1:
grid2[id2x[:-excess], id2y[:-excess]], grid1[id1x, id1y] = grid1[id1x, id1y], grid2[id2x[:-excess], id2y[:-excess]]
nrow = | np.full(grid1.shape[1], -1) | numpy.full |
import concurrent.futures
import enum
import itertools
import json
import logging
from pathlib import Path
import cv2
import hydra
import numpy as np
import scipy.interpolate
import tifffile
from omegaconf import OmegaConf, DictConfig
from tqdm import tqdm
CONFIG_FILE = 'config.yaml'
class DistortMode(enum.Enum):
LINEAR = 'linear'
NEAREST = 'nearest'
def distort_image(img: np.ndarray, cam_intr: np.ndarray, dist_coeff: np.ndarray,
mode: DistortMode = DistortMode.LINEAR, crop_output: bool = True,
crop_type: str = "corner") -> np.ndarray:
"""Apply fisheye distortion to an image
Args:
img (numpy.ndarray): BGR image. Shape: (H, W, 3)
cam_intr (numpy.ndarray): The camera intrinsics matrix, in pixels: [[fx, 0, cx], [0, fx, cy], [0, 0, 1]]
Shape: (3, 3)
dist_coeff (numpy.ndarray): The fisheye distortion coefficients, for OpenCV fisheye module.
Shape: (1, 4)
mode (DistortMode): For distortion, whether to use nearest neighbour or linear interpolation.
RGB images = linear, Mask/Surface Normals/Depth = nearest
crop_output (bool): Whether to crop the output distorted image into a rectangle. The 4 corners of the input
image will be mapped to 4 corners of the distorted image for cropping.
crop_type (str): How to crop.
"corner": We crop to the corner points of the original image, maintaining FOV at the top edge of image.
"middle": We take the widest points along the middle of the image (height and width). There will be black
pixels on the corners. To counter this, original image has to be higher FOV than the desired output.
Returns:
numpy.ndarray: The distorted image, same resolution as input image. Unmapped pixels will be black in color.
"""
assert cam_intr.shape == (3, 3)
assert dist_coeff.shape == (4,)
imshape = img.shape
if len(imshape) == 3:
h, w, chan = imshape
elif len(imshape) == 2:
h, w = imshape
chan = 1
else:
raise RuntimeError(f'Image has unsupported shape: {imshape}. Valid shapes: (H, W), (H, W, N)')
imdtype = img.dtype
# Get array of pixel co-ords
xs = np.arange(w)
ys = np.arange(h)
xv, yv = | np.meshgrid(xs, ys) | numpy.meshgrid |
import numpy as np
from epimargin.models import SIR
from epimargin.policy import PrioritizedAssignment
from studies.age_structure.commons import *
mp = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = np.array([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = np.array([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = np.array([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = np.array([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]),
prioritization = [6, 5, 4, 3, 2, 1, 0],
label = "test-mortality"
)
cr = PrioritizedAssignment(
daily_doses = 100,
effectiveness = 1,
S_bins = np.array([
[10, 20, 30, 40, 50, 50, 60],
[10, 20, 30, 40, 50, 50, 45],
[10, 20, 30, 40, 50, 50, 0]
]),
I_bins = np.array([
[0, 0, 0, 5, 6, 7, 10],
[0, 0, 0, 5, 6, 7, 45],
[0, 0, 0, 5, 6, 7, 70]
]),
age_ratios = np.array([0.2, 0.2, 0.25, 0.1, 0.1, 0.1, 0.05]),
IFRs = | np.array([0.01, 0.01, 0.01, 0.02, 0.02, 0.03, 0.04]) | numpy.array |
from copy import deepcopy
from numpy import sin, cos, pi, tan, arctan, array, arctan2, square, arcsin, savetxt
from math import pi, inf, sqrt, radians
def fk(q):
# Geometry
a1 = 0.235
a2 = 0.355
a4 = 0.20098
a5 = 0.345
d1 = 0.505
d5 = 0.00837
d6 = 0.6928
# DH table
dh = array([[q[0], d1, -a1, pi / 2],
[(q[1] + pi / 2), 0, a2, pi / 2],
[0, q[2], 0, 0],
[q[3], 0, -a4, pi / 2],
[q[4], -d5, -a5, -pi / 2],
[0, d6, 0, 0]])
# Transformation matrices
t1 = t_dh(dh[0, :])
t2 = t_dh(dh[1, :])
t3 = t_dh(dh[2, :])
t4 = t_dh(dh[3, :])
t5 = t_dh(dh[4, :])
t6 = t_dh(dh[5, :])
t = t1 @ t2 @ t3 @ t4 @ t5 @ t6
return t
def t_dh(u):
a = rot_z(u[0])
b = trans_z(u[1])
c = trans_x(u[2])
d = rot_x(u[3])
t = a @ b @ c @ d
return t
def rot_z(theta):
u = array([[cos(theta), -sin(theta), 0, 0],
[sin(theta), cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return u
def rot_x(alpha):
u = array([[1, 0, 0, 0],
[0, cos(alpha), -sin(alpha), 0],
[0, sin(alpha), cos(alpha), 0],
[0, 0, 0, 1]])
return u
def trans_x(a_n):
u = array([[1, 0, 0, a_n],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return u
def trans_z(d_n):
u = array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, d_n],
[0, 0, 0, 1]])
return u
def fk_wrist(q):
# Geometry
a4 = 0.20098
a5 = 0.345
d5 = 0.00837
d6 = 0.6928
# DH table
dh = array([[q[0], 0, -a4, pi/2],
[q[1], -d5, -a5, -pi/2],
[0, d6, 0, 0]])
# Transformation matrices
t1 = t_dh(dh[0, :])
t2 = t_dh(dh[1, :])
t3 = t_dh(dh[2, :])
t = t1 @ t2 @ t3
return t
def fk_boom(q):
# Geometry
a1 = 0.235
a2 = 0.355
d1 = 0.505
# DH table
dh = array([[q[0], d1, -a1, pi / 2],
[(q[1] + pi / 2), 0, a2, pi / 2],
[0, q[2], 0, 0]])
# Transformation matrices
t1 = t_dh(dh[0, :])
t2 = t_dh(dh[1, :])
t3 = t_dh(dh[2, :])
t = t1 @ t2 @ t3
return t
def ik_wrist(qb, rx, ry):
t2 = tan(ry / 2)
t3 = t2 ** 2
t5 = tan(qb[0] / 2)
t6 = t5 ** 2
t7 = t3 * t6
t11 = tan(qb[1] / 2 + pi / 4)
t14 = tan(rx / 2)
t15 = t14 * t5
t16 = t11 ** 2
t19 = t14 ** 2
t20 = t19 * t6
t21 = t19 * t3
t24 = t2 * t11
t25 = 2 * t24
t28 = 4 * t15 * t11
t31 = t19 * t2
t33 = 2 * t31 * t11
t34 = t2 * t6
t36 = 2 * t34 * t11
t37 = t6 * t11
t39 = 2 * t31 * t37
t40 = t14 * t3
t41 = t5 * t11
t43 = 4 * t40 * t41
t44 = t19 * t16 + t20 * t16 + t3 * t16 + t7 * t16 + t21 * t6 + t21 - t25 + t28 - t33 + t36 + t39 - t43 + t6 + 1
t45 = t6 * t16
t48 = t21 * t16 + t21 * t45 + t16 + t19 + t20 + t25 - t28 + t3 + t33 - t36 - t39 + t43 + t45 + t7
t51 = sqrt(t44 / t48)
t55 = t45 * t51
t65 = t16 * t51
t67 = t19 * t11 + t20 * t11 - t21 * t11 + t7 * t11 - 2 * t15 * t16 + t31 * t16 - t34 * t16 + t20 * t51 + 2 * t24 * t51 + t31 * t6 - 2 * t40 * t5 + t7 * t51 + 2 * t15 - t31 + t34 + t55 + t65
t79 = t11 * t51
t88 = t5 * t16
t92 = -2 * t31 * t37 * t51 + 4 * t40 * t41 * t51 + t3 * t11 - 4 * t15 * t79 + t2 * t16 + t19 * t51 - t21 * t37 + t21 * t55 + t21 * t65 + t3 * t51 - t31 * t45 + 2 * t31 * t79 - 2 * t34 * t79 + 2 * t40 * t88 - t11 - t2 - t37
t94 = t14 * t6
t96 = t2 * t5
t108 = t14 * t16 - t40 * t16 - t94 * t16 + 2 * t96 * t16 + 2 * t31 * t5 + 2 * t31 * t88 + t40 * t45 + t40 * t6 + t14 - t40 - t94 + 2 * t96
t111 = arctan((t67 + t92) / t108)
q_4 = 2 * t111
t1 = sin(rx)
t2 = cos(ry)
t6 = qb[1] / 2 + pi / 4
t7 = sin(t6)
t8 = cos(t6)
t10 = sin(qb[0])
t13 = 2 * t1 * t2 * t7 * t8 * t10
t14 = cos(rx)
t15 = t14 * t2
t16 = t8 ** 2
t18 = 2 * t15 * t16
t19 = sin(ry)
t21 = cos(qb[0])
t24 = 2 * t19 * t7 * t8 * t21
t29 = sqrt(-(t13 + t18 - t24 - t15 + 1) / (t13 + t18 - t24 - t15 - 1))
t30 = arctan(t29)
q_5 = -2 * t30
q = [q_4, q_5]
return q
def ik_boom(x, y, z):
q_1 = arctan2(y, x)
q_2 = 2*arctan2((200*z*cos(q_1)-101*cos(q_1)+(7369*cos(q_1)**2-40400*z*cos(q_1)**2+40000*z**2*cos(
q_1)**2+18800*x*cos(q_1)+40000*x**2)**(1/2)), (2*(100*x+59*cos(q_1))))-pi/2
q_3 = -(71*cos(q_2)-200*z+101)/(200* | sin(q_2) | numpy.sin |
import numpy as np
import pandas as pd
import scipy.stats as stats
from sklearn import decomposition as decomp
from scRNA.abstract_clustering import AbstractClustering
from scRNA.utils import center_kernel, normalize_kernel, kta_align_binary, \
get_matching_gene_inds, get_transferred_data_matrix, get_transferability_score
class NmfClustering(AbstractClustering):
num_cluster = -1
dictionary = None
data_matrix = None
def __init__(self, data, gene_ids, num_cluster, labels):
super(NmfClustering, self).__init__(data, gene_ids=gene_ids)
self.num_cluster = num_cluster
def apply(self, k=-1, alpha=1.0, l1=0.75, max_iter=100, rel_err=1e-3):
if k == -1:
k = self.num_cluster
X = self.pre_processing()
nmf = decomp.NMF(alpha=alpha, init='nndsvdar', l1_ratio=l1, max_iter=max_iter,
n_components=k, random_state=0, shuffle=True, solver='cd',
tol=rel_err, verbose=0)
W = nmf.fit_transform(X)
H = nmf.components_
self.cluster_labels = np.argmax(nmf.components_, axis=0)
if np.any(np.isnan(H)):
raise Exception('H contains NaNs (alpha={0}, k={1}, l1={2}, data={3}x{4}'.format(
alpha, k, l1, X.shape[0], X.shape[1]))
if np.any(np.isnan(W)):
raise Exception('W contains NaNs (alpha={0}, k={1}, l1={2}, data={3}x{4}'.format(
alpha, k, l1, X.shape[0], X.shape[1]))
# self.print_reconstruction_error(X, W, H)
self.dictionary = W
self.data_matrix = H
def print_reconstruction_error(self, X, W, H):
print((' Elementwise absolute reconstruction error : ', np.sum(np.abs(X - W.dot(H))) / np.float(X.size)))
print((' Fro-norm reconstruction error : ', np.sqrt(np.sum((X - W.dot(H))*(X - W.dot(H)))) / np.float(X.size)))
class NmfClustering_initW(AbstractClustering):
num_cluster = -1
dictionary = None
data_matrix = None
def __init__(self, data, gene_ids, num_cluster, labels):
super(NmfClustering_initW, self).__init__(data, gene_ids=gene_ids)
self.num_cluster = num_cluster
self.labels=labels
def apply(self, k=-1, alpha=1.0, l1=0.75, max_iter=100, rel_err=1e-3):
if k == -1:
k = self.num_cluster
X = self.pre_processing()
fixed_W = pd.get_dummies(self.labels)
fixed_W_t = fixed_W.T # interpret W as H (transpose), you can only fix H, while optimizing W in the code. So we simply switch those matrices (invert their roles).
learned_H_t, fixed_W_t_same, n_iter = decomp.non_negative_factorization(X.astype(np.float), n_components=k, init='custom', random_state=0, update_H=False, H=fixed_W_t.astype(np.float), alpha=alpha, l1_ratio=l1, max_iter=max_iter, shuffle=True, solver='cd',tol=rel_err, verbose=0)
init_W = fixed_W_t_same.T
init_H = learned_H_t.T
nmf = decomp.NMF(alpha=alpha, init='custom',l1_ratio=l1, max_iter=max_iter, n_components=k, random_state=0, shuffle=True, solver='cd', tol=rel_err, verbose=0)
W = nmf.fit_transform(X.T, W=init_W, H = init_H)
H = nmf.components_
self.cluster_labels = np.argmax(W, axis=1)
if np.any(np.isnan(H)):
raise Exception('H contains NaNs (alpha={0}, k={1}, l1={2}, data={3}x{4}'.format(
alpha, k, l1, X.shape[0], X.shape[1]))
if np.any(np.isnan(W)):
raise Exception('W contains NaNs (alpha={0}, k={1}, l1={2}, data={3}x{4}'.format(
alpha, k, l1, X.shape[0], X.shape[1]))
# self.print_reconstruction_error(X, W, H)
self.dictionary = H.T
self.data_matrix = W.T
def print_reconstruction_error(self, X, W, H):
print((' Elementwise absolute reconstruction error : ', np.sum(np.abs(X - W.dot(H))) / np.float(X.size)))
print((' Fro-norm reconstruction error : ', np.sqrt(np.sum((X - W.dot(H))*(X - W.dot(H)))) / | np.float(X.size) | numpy.float |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
import nvtabular as nvt
from merlin.core.dispatch import make_df
from nvtabular import ColumnSelector, Schema, Workflow, ops
try:
import cudf
_CPU = [True, False]
except ImportError:
_CPU = [True]
@pytest.mark.parametrize("cpu", _CPU)
@pytest.mark.parametrize("keys", [["name"], "id", ["name", "id"]])
def test_groupby_op(keys, cpu):
# Initial timeseries dataset
size = 60
df1 = make_df(
{
"name": np.random.choice(["Dave", "Zelda"], size=size),
"id": np.random.choice([0, 1], size=size),
"ts": np.linspace(0.0, 10.0, num=size),
"x": np.arange(size),
"y": np.linspace(0.0, 10.0, num=size),
"shuffle": np.random.uniform(low=0.0, high=10.0, size=size),
}
)
df1 = df1.sort_values("shuffle").drop(columns="shuffle").reset_index(drop=True)
# Create a ddf, and be sure to shuffle by the groupby keys
ddf1 = dd.from_pandas(df1, npartitions=3).shuffle(keys)
dataset = nvt.Dataset(ddf1, cpu=cpu)
dataset.schema.column_schemas["x"] = dataset.schema.column_schemas["x"].with_tags("custom_tag")
# Define Groupby Workflow
groupby_features = ColumnSelector(["name", "id", "ts", "x", "y"]) >> ops.Groupby(
groupby_cols=keys,
sort_cols=["ts"],
aggs={
"x": ["list", "sum"],
"y": ["first", "last"],
"ts": ["min"],
},
name_sep="-",
)
processor = nvt.Workflow(groupby_features)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
assert "custom_tag" in processor.output_schema.column_schemas["x-list"].tags
if not cpu:
# Make sure we are capturing the list type in `output_dtypes`
assert (
processor.output_schema["x-list"].dtype
== cudf.core.dtypes.ListDtype("int64").element_type
)
assert processor.output_schema["x-list"].is_list is True
assert processor.output_schema["x-list"].is_ragged is True
# Check list-aggregation ordering
x = new_gdf["x-list"]
x = x.to_pandas() if hasattr(x, "to_pandas") else x
sums = []
for el in x.values:
_el = pd.Series(el)
sums.append(_el.sum())
assert _el.is_monotonic_increasing
# Check that list sums match sum aggregation
x = new_gdf["x-sum"]
x = x.to_pandas() if hasattr(x, "to_pandas") else x
assert list(x) == sums
# Check basic behavior or "y" column
assert (new_gdf["y-first"] < new_gdf["y-last"]).all()
@pytest.mark.parametrize("cpu", _CPU)
def test_groupby_string_agg(cpu):
# Initial sales dataset
size = 60
df1 = make_df(
{
"product_id": np.random.randint(10, size=size),
"day": np.random.randint(7, size=size),
"price": | np.random.rand(size) | numpy.random.rand |
import copy
import functions.setting.setting_utils as su
from joblib import Parallel, delayed
import json
import logging
import multiprocessing
import numpy as np
import os
import time
def search_indices(dvf, c, class_balanced, margin, dim_im, torso):
"""
This function searches for voxels based on the ClassBalanced in the parallel mode: if Setting['ParallelSearching'] == True
:param dvf: input DVF
:param c: enumerate of the class (in for loop over all classes)
:param class_balanced: a vector indicates the classes, for instance [a,b] implies classes [0,a), [a,b)
:param margin: Margin of the image. so no voxel would be selected if the index is smaller than K or greater than (ImageSize - K)
:param dim_im: '2D' or '3D'. Please note that in 2D setting, we still have a 3D DVF with zero values for the third direction. Hence, we can't use np.all and we have to use np.any.
:param torso:
:return: I1 which is a numpy array of ravel_multi_index
<NAME> <EMAIL>
"""
mask = np.zeros(np.shape(dvf)[:-1], dtype=np.bool)
mask[margin:-margin, margin:-margin, margin:-margin] = True
if torso is not None:
mask = mask & torso
i1 = None
if c == 0:
# Future: you can add a mask here to prevent selecting pixels twice!
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf) < class_balanced[c]), axis=3)) & mask), np.shape(dvf)[:-1]).astype(np.int32)
# the output of np.where occupy huge part of memory! by converting it to a numpy array lots of memory can be saved!
elif (c > 0) & (c < len(class_balanced)):
if dim_im == 2:
# in 2D experiments, the DVFList is still in 3D and for the third direction is set to 0. Here we use np.any() instead of np.all()
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf) < class_balanced[c]), axis=3)) & (np.any((np.abs(dvf) >= class_balanced[c - 1]), axis=3)) &
mask), np.shape(dvf)[:-1]).astype(np.int32)
elif dim_im == 3:
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf) < class_balanced[c]), axis=3)) & (np.all((np.abs(dvf) >= class_balanced[c - 1]), axis=3)) &
mask), np.shape(dvf)[:-1]).astype(np.int32)
return i1
def search_indices_seq(dvf_label, c, class_balanced, margin, torso):
"""
This function searches for voxels based on the ClassBalanced in the parallel mode: if Setting['ParallelSearching'] == True
:param dvf_label: input DVF
:param c: enumerate of the class (in for loop over all classes)
:param class_balanced: a vector indicates the classes, for instance [a,b] implies classes [0,a), [a,b)
:param margin: Margin of the image. so no voxel would be selected if the index is smaller than K or greater than (ImageSize - K)
:param dim_im: '2D' or '3D'. Please note that in 2D setting, we still have a 3D DVF with zero values for the third direction. Hence, we can't use np.all and we have to use np.any.
:return: I1 which is a numpy array of ravel_multi_index
<NAME> <EMAIL>
"""
mask = np.zeros(np.shape(dvf_label), dtype=np.bool)
mask[margin:-margin, margin:-margin, margin:-margin] = True
if torso is not None:
mask = mask & torso
if isinstance(class_balanced[c], list):
if len(class_balanced[c]) == 2:
i1 = np.ravel_multi_index(np.where(np.logical_and(np.logical_or(
dvf_label == class_balanced[c][0], dvf_label == class_balanced[c][1]), mask)), np.shape(dvf_label)).astype(np.int32)
else:
raise ValueError('implemented for maximum of two values per class')
else:
i1 = np.ravel_multi_index(np.where(np.logical_and(dvf_label == class_balanced[c], mask)), np.shape(dvf_label)).astype(np.int32)
# the output of np.where occupy huge part of memory! by converting it to a numpy array lots of memory can be saved!
return i1
def shuffled_indices_from_chunk(setting, dvf_list=None, torso_list=None, im_info_list=None, stage=None, stage_sequence=None,
semi_epoch=None, chunk=None, samples_per_image=None, log_header='', full_image=None, seq_mode=False,
chunk_length_force_to_multiple_of=None):
if full_image:
ishuffled = np.arange(len(dvf_list))
else:
if seq_mode:
ishuffled = shuffled_indices_from_chunk_patch_seq(setting, dvf_list=dvf_list, torso_list=torso_list,
stage_sequence=stage_sequence, semi_epoch=semi_epoch, chunk=chunk,
samples_per_image=samples_per_image, log_header=log_header,
chunk_length_force_to_multiple_of=chunk_length_force_to_multiple_of)
else:
ishuffled = shuffled_indices_from_chunk_patch(setting, dvf_list=dvf_list, torso_list=torso_list, im_info_list=im_info_list,
stage=stage, semi_epoch=semi_epoch, chunk=chunk, samples_per_image=samples_per_image,
log_header=log_header)
return ishuffled
def shuffled_indices_from_chunk_patch(setting, dvf_list=None, torso_list=None, im_info_list=None, stage=None, semi_epoch=None,
chunk=None, samples_per_image=None, log_header=''):
for single_dict in setting['DataExpDict']:
iclass_folder = su.address_generator(setting, 'IClassFolder', data=single_dict['data'], deform_exp=single_dict['deform_exp'], stage=stage)
if not (os.path.isdir(iclass_folder)):
os.makedirs(iclass_folder)
margin = setting['Margin']
class_balanced = setting['ClassBalanced']
indices = {}
for c in range(len(class_balanced)):
indices['class'+str(c)] = []
start_time = time.time()
if setting['ParallelSearching']:
num_cores = multiprocessing.cpu_count() - 2
results = [None] * len(dvf_list) * len(class_balanced)
count_iclass_loaded = 0
for i_dvf, im_info in enumerate(im_info_list):
for c in range(len(class_balanced)):
iclass_address = su.address_generator(setting, 'IClass', data=im_info['data'], deform_exp=im_info['deform_exp'], cn=im_info['cn'],
type_im=im_info['type_im'], dsmooth=im_info['dsmooth'], c=c, stage=stage)
if os.path.isfile(iclass_address):
results[i_dvf * len(class_balanced) + c] = np.load(iclass_address) # double checked
count_iclass_loaded += 1
if count_iclass_loaded != len(results):
logging.debug(log_header+': not all I1 found. start calculating... SemiEpoch = {}, Chunk = {}, stage={}'.format(semi_epoch, chunk, stage))
results = Parallel(n_jobs=num_cores)(delayed(search_indices)(dvf=dvf_list[i], torso=torso_list[i],
c=c, class_balanced=class_balanced, margin=margin,
dim_im=setting['Dim'])
for i in range(0, len(dvf_list)) for c in range(0, len(class_balanced)))
for i_dvf, im_info in enumerate(im_info_list):
for c in range(0, len(class_balanced)):
iclass_address = su.address_generator(setting, 'IClass', data=im_info['data'], deform_exp=im_info['deform_exp'], cn=im_info['cn'],
type_im=im_info['type_im'], dsmooth=im_info['dsmooth'], c=c, stage=stage)
np.save(iclass_address, results[i_dvf * len(class_balanced) + c]) # double checked
for iresults in range(0, len(results)):
i_dvf = iresults // (len(class_balanced)) # first loop in the Parallel: for i in range(0, len(dvf_list))
c = iresults % (len(class_balanced)) # second loop in the Parallel: for j in range(0, len(class_balanced)+1)
if len(results[iresults]):
if len(indices['class'+str(c)]) == 0:
indices['class'+str(c)] = np.array(np.c_[results[iresults], i_dvf * np.ones(len(results[iresults]), dtype=np.int32)])
else:
indices['class'+str(c)] = np.concatenate((indices['class'+str(c)], np.array(np.c_[results[iresults], i_dvf * np.ones(len(results[iresults]), dtype=np.int32)])), axis=0)
del results
end_time = time.time()
if setting['verbose']:
logging.debug(log_header+' Parallel searching for {} classes is Done in {:.2f}s'.format(len(class_balanced), end_time - start_time))
else:
for i_dvf, im_info in enumerate(im_info_list):
mask = np.zeros(np.shape(dvf_list[i_dvf])[:-1], dtype=np.bool)
mask[margin:-margin, margin:-margin, margin:-margin] = True
if torso_list[i_dvf] is not None:
mask = mask & torso_list[i_dvf]
for c in range(len(class_balanced)):
iclass_address = su.address_generator(setting, 'IClass', data=im_info['data'], deform_exp=im_info['deform_exp'], cn=im_info['cn'],
type_im=im_info['type_im'], dsmooth=im_info['dsmooth'], c=c, stage=stage)
if os.path.isfile(iclass_address):
i1 = np.load(iclass_address)
else:
if c == 0:
# you can add a mask here to prevent selecting pixels twice!
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf_list[i_dvf]) < class_balanced[c]), axis=3)) & mask),
np.shape(dvf_list[i_dvf])[:-1]).astype(np.int32)
# the output of np.where occupy huge part of memory! by converting it to a numpy array lots of memory can be saved!
if (c > 0) & (c < (len(class_balanced))):
if setting['Dim'] == 2:
# in 2D experiments, the DVFList is still in 3D and for the third direction is set to 0. Here we use np.any() instead of np.all()
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf_list[i_dvf]) < class_balanced[c]), axis=3)) &
(np.any((np.abs(dvf_list[i_dvf]) >= class_balanced[c - 1]), axis=3)) & mask),
np.shape(dvf_list[i_dvf])[:-1]).astype(np.int32)
if setting['Dim'] == 3:
i1 = np.ravel_multi_index(np.where((np.all((np.abs(dvf_list[i_dvf]) < class_balanced[c]), axis=3)) &
(np.all((np.abs(dvf_list[i_dvf]) >= class_balanced[c - 1]), axis=3)) & mask),
np.shape(dvf_list[i_dvf])[:-1]).astype(np.int32)
np.save(iclass_address, i1)
if len(i1) > 0:
if len(indices['class'+str(c)]) == 0:
indices['class'+str(c)] = np.array(np.c_[i1, i_dvf * np.ones(len(i1), dtype=np.int32)])
else:
indices['class'+str(c)] = np.concatenate((indices['class'+str(c)], np.array(np.c_[i1, i_dvf * np.ones(len(i1), dtype=np.int32)])), axis=0)
if setting['verbose']:
logging.debug(log_header+': Finding classes done for i = {}, c = {} '.format(i_dvf, c))
del i1
end_time = time.time()
if setting['verbose']:
logging.debug(log_header+': Searching for {} classes is Done in {:.2f}s'.format(len(class_balanced) + 1, end_time - start_time))
samples_per_chunk = samples_per_image * len(dvf_list)
sample_per_chunk_per_class = np.round(samples_per_chunk / (len(class_balanced)))
number_samples_class = np.empty(len(class_balanced), dtype=np.int32)
random_state = np.random.RandomState(semi_epoch * 10000 + chunk * 100 + stage)
selected_indices = np.array([])
for c, k in enumerate(indices.keys()):
number_samples_class[c] = min(sample_per_chunk_per_class, np.shape(indices[k])[0])
# it is possible to have different number in each class. However we perefer to have at least SamplePerChunkPerClass
if np.shape(indices['class'+str(c)])[0] > 0:
i1 = random_state.randint(0, high=np.shape(indices['class' + str(c)])[0], size=number_samples_class[c])
if c == 0 or len(selected_indices) == 0:
selected_indices = np.concatenate((indices['class' + str(c)][i1, :], c * np.ones([len(i1), 1], dtype=np.int32)), axis=1).astype(np.int32)
else:
selected_indices = np.concatenate((selected_indices,
np.concatenate((indices['class' + str(c)][i1, :],
c * np.ones([len(i1), 1], dtype=np.int32)),
axis=1)),
axis=0)
logging.info(log_header + ': {} of samples in class {} for SemiEpoch = {}, Chunk = {} '.
format(number_samples_class[c], c, semi_epoch, chunk))
if setting['verbose']:
logging.debug(log_header+': samplesPerChunk is {} for SemiEpoch = {}, Chunk = {} '.format(sum(number_samples_class), semi_epoch, chunk))
shuffled_index = np.arange(0, len(selected_indices))
random_state.shuffle(shuffled_index)
return selected_indices[shuffled_index]
def shuffled_indices_from_chunk_patch_seq(setting, dvf_list=None, torso_list=None, stage_sequence=None, semi_epoch=None,
chunk=None, samples_per_image=None, log_header='', chunk_length_force_to_multiple_of=None):
margin = setting['Margin']
class_balanced = setting['ClassBalanced']
indices = {}
for c in range(len(class_balanced)):
indices['class'+str(c)] = []
start_time = time.time()
if setting['ParallelSearching']:
num_cores = multiprocessing.cpu_count() - 2
results = Parallel(n_jobs=num_cores)(delayed(search_indices_seq)(dvf_label=dvf_list[i], c=c, class_balanced=class_balanced, margin=margin, torso=torso_list[i]['stage1'])
for i in range(len(dvf_list)) for c in range(0, len(class_balanced)))
for iresults in range(0, len(results)):
i_dvf = iresults // (len(class_balanced)) # first loop in the Parallel: for i in range(0, len(dvf_list))
c = iresults % (len(class_balanced)) # second loop in the Parallel: for j in range(0, len(class_balanced)+1)
if len(results[iresults]):
if len(indices['class'+str(c)]) == 0:
indices['class'+str(c)] = np.array(np.c_[results[iresults], i_dvf * np.ones(len(results[iresults]), dtype=np.int32)])
else:
indices['class'+str(c)] = np.concatenate((indices['class'+str(c)], np.array(np.c_[results[iresults], i_dvf * np.ones(len(results[iresults]), dtype=np.int32)])), axis=0)
del results
end_time = time.time()
if setting['verbose']:
logging.debug(log_header+' Parallel searching for {} classes is Done in {:.2f}s'.format(len(class_balanced), end_time - start_time))
samples_per_chunk = samples_per_image * len(dvf_list)
sample_per_chunk_per_class = np.round(samples_per_chunk / (len(class_balanced)))
number_samples_class = np.empty(len(class_balanced), dtype=np.int32)
random_state = np.random.RandomState(semi_epoch * 10000 + chunk * 100 + stage_sequence[0])
selected_indices = np.array([])
for c, k in enumerate(indices.keys()):
number_samples_class[c] = min(sample_per_chunk_per_class * setting['ClassBalancedWeight'][c], np.shape(indices[k])[0])
# it is possible to have different number in each class. However we perefer to have at least SamplePerChunkPerClass
if np.shape(indices['class'+str(c)])[0] > 0:
i1 = random_state.randint(0, high=np.shape(indices['class' + str(c)])[0], size=number_samples_class[c])
if c == 0 or len(selected_indices) == 0:
selected_indices = np.concatenate((indices['class' + str(c)][i1, :], c * np.ones([len(i1), 1], dtype=np.int32)), axis=1).astype(np.int32)
else:
selected_indices = np.concatenate((selected_indices,
np.concatenate((indices['class' + str(c)][i1, :],
c * np.ones([len(i1), 1], dtype=np.int32)),
axis=1)),
axis=0)
logging.info(log_header + ': {} of samples in class {} for SemiEpoch = {}, Chunk = {} '.
format(number_samples_class[c], c, semi_epoch, chunk))
if setting['verbose']:
logging.debug(log_header+': samplesPerChunk is {} for SemiEpoch = {}, Chunk = {} '.format(sum(number_samples_class), semi_epoch, chunk))
shuffled_index = np.arange(0, len(selected_indices))
random_state.shuffle(shuffled_index)
if chunk_length_force_to_multiple_of is not None:
remainder = len(shuffled_index) % chunk_length_force_to_multiple_of
if remainder != 0:
shuffled_index = shuffled_index[0: len(shuffled_index) - remainder]
return selected_indices[shuffled_index]
def get_ishuffled_folder_write_ishuffled_setting(setting, train_mode, stage, number_of_images_per_chunk,
samples_per_image, im_info_list_full, full_image,
chunk_length_force_to_multiple_of=None):
"""
Thi functions chooses or creates the IShuffledFolder. First it takes a look at the ishuffled_root_folder, if there is no
folder there it creates the folder and save the ishuffled_setting to a json file.
If a folder already exists, it compares the ishuffled setting with the json file in that folder. If they are identical, then
choose that folder. Otherwise it will create another folder by increasing the exp number:
Example ishuffled_folder: Training_images120_S4_exp0, Training_images120_S4_exp1
please not that the order of im_lins_info is important. Different order means different images in chunks.
:return: ishuffled_folder
"""
ishuffled_setting = {'train_mode': train_mode,
'DVFPad': setting['DVFPad_S' + str(stage)],
'ImPad': setting['ImPad_S' + str(stage)],
'NumberOfImagesPerChunk': number_of_images_per_chunk,
'ImInfoList': im_info_list_full,
}
if 'DVFThresholdList' in setting.keys():
ishuffled_setting['DVFThresholdList'] = copy.deepcopy(setting['DVFThresholdList'])
if chunk_length_force_to_multiple_of is not None:
ishuffled_setting['ChunkLengthForceToMultipleOf'] = chunk_length_force_to_multiple_of
if 'ClassBalancedWeight' in setting.keys():
ishuffled_setting['ClassBalancedWeight'] = setting['ClassBalancedWeight']
if not full_image:
# other important setting in patch based
ishuffled_setting['ClassBalanced'] = setting['ClassBalanced']
ishuffled_setting['Margin'] = setting['Margin']
ishuffled_setting['SamplePerImage'] = samples_per_image
ishuffled_folder = None
ishuffled_exp = 0
folder_found = False
while not folder_found:
ishuffled_folder = su.address_generator(setting, 'IShuffledFolder',
train_mode=train_mode,
stage=stage,
ishuffled_exp=ishuffled_exp,
im_list_info=im_info_list_full)
ishuffled_setting_address = su.address_generator(setting, 'IShuffledSetting',
train_mode=train_mode,
stage=stage,
ishuffled_exp=ishuffled_exp,
im_list_info=im_info_list_full)
if not (os.path.isdir(ishuffled_folder)):
os.makedirs(ishuffled_folder)
with open(ishuffled_setting_address, 'w') as f:
f.write(json.dumps(ishuffled_setting, sort_keys=True, indent=4, separators=(',', ': ')))
folder_found = True
else:
with open(ishuffled_setting_address, 'r') as f:
ishuffled_setting_exp = json.load(f)
if ishuffled_setting_exp == ishuffled_setting:
folder_found = True
else:
ishuffled_exp = ishuffled_exp + 1
return ishuffled_folder
def extract_batch(setting, stage, fixed_im_list, deformed_im_list, dvf_list, ish,
batch_counter, batch_size, end_batch, full_image):
if full_image:
batch_both, batch_dvf = extract_batch_from_image(setting, stage, fixed_im_list, deformed_im_list,
dvf_list, ish, batch_counter, batch_size, end_batch)
else:
batch_both, batch_dvf = extract_batch_from_patch(setting, stage, fixed_im_list, deformed_im_list,
dvf_list, ish, batch_counter, batch_size, end_batch)
return batch_both, batch_dvf
def extract_batch_seq(setting, stage_sequence, fixed_im_list, moved_im_list, dvf_list, ish,
batch_counter, batch_size, end_batch, full_image):
if full_image:
print('not implemented')
else:
batch_both, batch_dvf = extract_batch_from_patch_seq(setting, stage_sequence, fixed_im_list, moved_im_list,
dvf_list, ish, batch_counter, batch_size, end_batch)
return batch_both, batch_dvf
def extract_batch_from_image(setting, stage, fixed_im_list, deformed_im_list, dvf_list, ish,
batch_counter, batch_size, end_batch):
batch_im = np.stack([fixed_im_list[ish[i]] for i in range(batch_counter * batch_size, end_batch)], axis=0)
batch_deformed = np.stack([deformed_im_list[ish[i]] for i in range(batch_counter * batch_size, end_batch)], axis=0)
batch_dvf = np.stack([dvf_list[ish[i]] for i in range(batch_counter * batch_size, end_batch)], axis=0)
batch_both = np.stack((batch_im, batch_deformed), axis=setting['Dim']+1)
return batch_both, batch_dvf
def extract_batch_from_patch(setting, stage, fixed_im_list, deformed_im_list, dvf_list, ish,
batch_counter, batch_size, end_batch):
# ish [: , 0] the index of the sample that is gotten from np.where
# ish [: , 1] the the number of the image in FixedImList
# ish [: , 2] the the number of class, which is not needed anymore!!
r = setting['R']
ry = setting['Ry']
if setting['Dim'] == 2:
shift_center = setting['ImPad_S' + str(stage)] - setting['DVFPad_S' + str(stage)]
batch_im = np.stack([fixed_im_list[ish[i, 1]][
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:2])[0],
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:2])[1] - r + shift_center:
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:2])[1] + r + shift_center + 1,
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:2])[2] - r + shift_center:
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:2])[2] + r + shift_center + 1,
np.newaxis] for i in range(batch_counter * batch_size, end_batch)])
batch_deformed = np.stack([deformed_im_list[ish[i, 1]][
np.unravel_index(ish[i, 0], np.shape(deformed_im_list[ish[i, 1]])[0:2])[0],
np.unravel_index(ish[i, 0], np.shape(deformed_im_list[ish[i, 1]])[0:2])[1] - r + shift_center:
np.unravel_index(ish[i, 0], np.shape(deformed_im_list[ish[i, 1]])[0:2])[1] + r + shift_center + 1,
np.unravel_index(ish[i, 0], np.shape(deformed_im_list[ish[i, 1]])[0:2])[2] - r + shift_center:
np.unravel_index(ish[i, 0], np.shape(deformed_im_list[ish[i, 1]])[0:2])[2] + r + shift_center + 1,
np.newaxis] for i in range(batch_counter * batch_size, end_batch)])
batch_both = np.concatenate((batch_im, batch_deformed), axis=3)
batch_dvf = np.stack([dvf_list[ish[i, 1]][
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]]))[0],
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]]))[1] - ry:
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]]))[1] + ry + 1,
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]]))[2] - ry:
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]]))[2] + ry + 1,
0:2] for i in range(batch_counter * batch_size, end_batch)])
elif setting['Dim'] == 3:
shift_center = setting['ImPad_S' + str(stage)] - setting['DVFPad_S' + str(stage)]
batch_im = np.stack([fixed_im_list[ish[i, 1]][
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:3])[0] - r + shift_center:
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:3])[0] + r + shift_center + 1,
np.unravel_index(ish[i, 0], np.shape(dvf_list[ish[i, 1]])[0:3])[1] - r + shift_center:
np.unravel_index(ish[i, 0], | np.shape(dvf_list[ish[i, 1]]) | numpy.shape |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could be OK)
IF self.integrated_fibre[fibre]/median_running[fibre] > max_value -> SUSPICIOUS FIBRE
Example
----------
self.correct_high_cosmics_and_defects(correct_high_cosmics=False, step=40, remove_5578 = True,
clip_high=120, plot_suspicious_fibres=True, warnings=True, verbose=False, plot=True)
"""
print("\n> Correcting for high cosmics and CCD defects...")
wave_min = self.valid_wave_min # CHECK ALL OF THIS...
wave_max = self.valid_wave_max
wlm = self.wavelength
if correct_high_cosmics == False:
print(" Only CCD defects (nan and negative values) are considered.")
else:
print(" Using clip_high = {} for high cosmics".format(clip_high))
print(" IMPORTANT: Be sure that any emission or sky line is fainter than clip_high/continuum !! ")
flux_5578 = [] # For correcting sky line 5578 if requested
if wave_min < 5578 and remove_5578:
print(" Sky line 5578 will be removed using a Gaussian fit...")
integrated_fibre_uncorrected = self.integrated_fibre
print(" ")
output_every_few = np.sqrt(self.n_spectra) + 1
next_output = -1
max_ratio_list = []
for fibre in range(self.n_spectra):
if fibre > next_output:
sys.stdout.write("\b" * 30)
sys.stdout.write(
" Cleaning... {:5.2f}% completed".format(
fibre * 100.0 / self.n_spectra
)
)
sys.stdout.flush()
next_output = fibre + output_every_few
s = self.intensity_corrected[fibre]
running_wave = []
running_step_median = []
cuts = np.int(self.n_wave/step) # using np.int instead of // for improved readability
for cut in range(cuts):
if cut == 0:
next_wave = wave_min
else:
next_wave = np.nanmedian(
(wlm[np.int(cut * step)] + wlm[np.int((cut + 1) * step)])/2
)
if next_wave < wave_max:
running_wave.append(next_wave)
# print("SEARCHFORME1", step, running_wave[cut])
region = np.where(
(wlm > running_wave[cut] - np.int(step/2)) # step/2 doesn't need to be an int, but probably
& (wlm < running_wave[cut] + np.int(step/2)) # want it to be so the cuts are uniform.
)
# print('SEARCHFORME3', region)
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
running_wave.append(wave_max)
region = np.where((wlm > wave_max - step) & (wlm < wave_max))
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
for i in range(len(running_step_median)):
if np.isnan(running_step_median[i]) == True:
if i < 10:
running_step_median[i] = np.nanmedian(running_step_median[0:9])
if i > 10:
running_step_median[i] = np.nanmedian(
running_step_median[-9:-1]
)
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
running_wave, running_step_median, 7
)
fit_median = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
)
if fibre == fibre_p:
espectro_old = copy.copy(self.intensity_corrected[fibre, :])
espectro_fit_median = fit_median
for wave in range(self.n_wave): # (1,self.n_wave-3):
if s[wave] < 0:
s[wave] = fit_median[wave] # Negative values for median values
if np.isnan(s[wave]) == True:
s[wave] = fit_median[wave] # nan for median value
if (
correct_high_cosmics and fit_median[wave] > 0
): # NEW 15 Feb 2019, v7.1 2dFdr takes well cosmic rays
if s[wave] > clip_high * fit_median[wave]:
if verbose:
print(" "
"CLIPPING HIGH = {} in fibre {} w = {} value= {} v/median= {}".format(clip_high, fibre, wlm[wave], s[wave], s[wave]/fit_median[wave])) # " median=",fit_median[wave]
s[wave] = fit_median[wave]
if fibre == fibre_p:
espectro_new = copy.copy(s)
max_ratio_list.append(np.nanmax(s/fit_median))
self.intensity_corrected[fibre, :] = s
# Removing Skyline 5578 using Gaussian fit if requested
if wave_min < 5578 and remove_5578:
resultado = fluxes(
wlm, s, 5578, plot=False, verbose=False
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre] = resultado[11]
flux_5578.append(resultado[3])
sys.stdout.write("\b" * 30)
sys.stdout.write(" Cleaning... 100.00 completed")
sys.stdout.flush()
max_ratio = np.nanmax(max_ratio_list)
print("\n Maximum value found of flux/continuum = {}".format(max_ratio))
if correct_high_cosmics:
print(" Recommended value for clip_high = {} , here we used {}".format(int(max_ratio + 1), clip_high))
# Plot correction in fibre p_fibre
if fibre_p > 0:
plot_correction_in_fibre_p_fibre(fig_size,
wlm,
espectro_old,
espectro_fit_median,
espectro_new,
fibre_p,
clip_high)
# print" "
if correct_high_cosmics == False:
text = "for spectra corrected for defects..."
title = " - Throughput + CCD defects corrected"
else:
text = "for spectra corrected for high cosmics and defects..."
title = " - Throughput + high-C & D corrected"
self.compute_integrated_fibre(
valid_wave_min=wave_min,
valid_wave_max=wave_max,
text=text,
plot=plot,
title=title,
)
if plot:
print(" Plotting integrated fibre values before and after correcting for high cosmics and CCD defects:\n")
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(integrated_fibre_uncorrected, "r", label="Uncorrected", alpha=0.5)
plt.ylabel("Integrated Flux")
plt.xlabel("Fibre")
plt.ylim(
[np.nanmin(self.integrated_fibre), np.nanmax(self.integrated_fibre)]
)
plt.title(self.description)
# Check if integrated value is high
median_running = []
step_f = 10
max_value = 2.0 # For stars this is not accurate, as i/m might be between 5 and 100 in the fibres with the star
skip = 0
suspicious_fibres = []
for fibre in range(self.n_spectra):
if fibre < step_f:
median_value = np.nanmedian(
self.integrated_fibre[0: np.int(step_f)]
)
skip = 1
if fibre > self.n_spectra - step_f:
median_value = np.nanmedian(
self.integrated_fibre[-1 - np.int(step_f): -1]
)
skip = 1
if skip == 0:
median_value = np.nanmedian(
self.integrated_fibre[
fibre - np.int(step_f/2): fibre + np.int(step_f/2) # np.int is used instead of // of readability
]
)
median_running.append(median_value)
if self.integrated_fibre[fibre]/median_running[fibre] > max_value:
print(" Fibre {} has a integrated/median ratio of {} -> Might be a cosmic left!".format(fibre, self.integrated_fibre[fibre]/median_running[fibre]))
label = np.str(fibre)
plt.axvline(x=fibre, color="k", linestyle="--")
plt.text(fibre, self.integrated_fibre[fibre] / 2.0, label)
suspicious_fibres.append(fibre)
skip = 0
plt.plot(self.integrated_fibre, label="Corrected", alpha=0.6)
plt.plot(median_running, "k", label="Median", alpha=0.6)
plt.legend(frameon=False, loc=1, ncol=3)
plt.minorticks_on()
#plt.show()
#plt.close()
if plot_suspicious_fibres == True and len(suspicious_fibres) > 0:
# Plotting suspicious fibres..
figures = plot_suspicious_fibres_graph(
self,
suspicious_fibres,
fig_size,
wave_min,
wave_max,
intensity_corrected_fiber=self.intensity_corrected)
if remove_5578 and wave_min < 5578:
print(" Skyline 5578 has been removed. Checking throughput correction...")
flux_5578_medfilt = sig.medfilt(flux_5578, np.int(5))
median_flux_5578_medfilt = np.nanmedian(flux_5578_medfilt)
extra_throughput_correction = flux_5578_medfilt/median_flux_5578_medfilt
# plt.plot(extra_throughput_correction)
# plt.show()
# plt.close()
if plot:
fig = plot_skyline_5578(fig_size, flux_5578, flux_5578_medfilt)
print(" Variations in throughput between {} and {} ".format(
np.nanmin(extra_throughput_correction), np.nanmax(extra_throughput_correction)
))
print(" Applying this extra throughtput correction to all fibres...")
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/extra_throughput_correction[i]
)
self.relative_throughput = (
self.relative_throughput * extra_throughput_correction
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def clean_sky_residuals(
self,
extra_w=1.3,
step=25,
dclip=3.0,
wave_min=0,
wave_max=0,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
This task HAVE TO BE USED WITH EXTREME CARE
as it has not been properly tested!!!
It CAN DELETE REAL (faint) ABSORPTION/EMISSION features in spectra!!!
Use the "1dfit" option for getting a better sky substraction
ANGEL is keeping this here just in case it is eventually useful...
Parameters
----------
extra_w
step
dclip
wave_min
wave_max
verbose
plot
fig_size
fibre
Returns
-------
"""
# verbose=True
wlm = self.wavelength
if wave_min == 0:
wave_min = self.valid_wave_min
if wave_max == 0:
wave_max = self.valid_wave_max
# Exclude ranges with emission lines if needed
exclude_ranges_low = []
exclude_ranges_high = []
exclude_ranges_low_ = []
exclude_ranges_high_ = []
if self.el[1][0] != 0:
# print " Emission lines identified in the combined spectrum:"
for el in range(len(self.el[0])):
# print " {:3}. - {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(el+1,self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el])
if (
self.el[0][el] == "Ha" or self.el[1][el] == 6583.41
): # Extra extend for Ha and [N II] 6583
extra = extra_w * 1.6
else:
extra = extra_w
exclude_ranges_low_.append(
self.el[2][el] - self.el[3][el] * extra
) # center-1.3*FWHM/2
exclude_ranges_high_.append(
self.el[2][el] + self.el[3][el] * extra
) # center+1.3*FWHM/2
# print self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el],exclude_ranges_low[el],exclude_ranges_high[el],extra
# Check overlapping ranges
skip_next = 0
for i in range(len(exclude_ranges_low_) - 1):
if skip_next == 0:
if exclude_ranges_high_[i] > exclude_ranges_low_[i + 1]:
# Ranges overlap, now check if next range also overlaps
if i + 2 < len(exclude_ranges_low_):
if exclude_ranges_high_[i + 1] > exclude_ranges_low_[i + 2]:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 2])
skip_next = 2
if verbose:
print("Double overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 1])
skip_next = 1
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i])
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
if skip_next == 1:
skip_next = 0
if skip_next == 2:
skip_next = 1
if verbose:
print(exclude_ranges_low_[i], exclude_ranges_high_[i], skip_next)
if skip_next == 0:
exclude_ranges_low.append(exclude_ranges_low_[-1])
exclude_ranges_high.append(exclude_ranges_high_[-1])
if verbose:
print(exclude_ranges_low_[-1], exclude_ranges_high_[-1], skip_next)
# print "\n> Cleaning sky residuals in range [",wave_min,",",wave_max,"] avoiding emission lines... "
print("\n> Cleaning sky residuals avoiding emission lines... ")
if verbose:
print(" Excluded ranges using emission line parameters:")
for i in range(len(exclude_ranges_low_)):
print(exclude_ranges_low_[i], exclude_ranges_high_[i])
print(" Excluded ranges considering overlaps: ")
for i in range(len(exclude_ranges_low)):
print(exclude_ranges_low[i], exclude_ranges_high[i])
print(" ")
else:
exclude_ranges_low.append(20000.0)
exclude_ranges_high.append(30000.0)
print("\n> Cleaning sky residuals...")
say_status = 0
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {} ...".format(fibre))
say_status = say_status + 100
s = self.intensity_corrected[fibre]
fit_median = smooth_spectrum(
wlm,
s,
step=step,
wave_min=wave_min,
wave_max=wave_max,
weight_fit_median=1.0,
plot=False,
)
old = []
if plot:
for i in range(len(s)):
old.append(s[i])
disp = s - fit_median
dispersion = np.nanmedian(np.abs(disp))
rango = 0
imprimir = 1
for i in range(len(wlm) - 1):
# if wlm[i] > wave_min and wlm[i] < wave_max : # CLEAN ONLY IN VALID WAVEVELENGTHS
if (
wlm[i] >= exclude_ranges_low[rango]
and wlm[i] <= exclude_ranges_high[rango]
):
if verbose == True and imprimir == 1:
print(" Excluding range [ {} , {} ] as it has an emission line".format(
exclude_ranges_low[rango], exclude_ranges_high[rango]))
if imprimir == 1:
imprimir = 0
# print " Checking ", wlm[i]," NOT CORRECTED ",s[i], s[i]-fit_median[i]
else:
if np.isnan(s[i]) == True:
s[i] = fit_median[i] # nan for median value
if (
disp[i] > dispersion * dclip
and disp[i + 1] < -dispersion * dclip
):
s[i] = fit_median[i]
s[i + 1] = fit_median[i + 1] # "P-Cygni-like structures
if verbose:
print(" Found P-Cygni-like feature in {}".format(wlm[i]))
if disp[i] > dispersion * dclip or disp[i] < -dispersion * dclip:
s[i] = fit_median[i]
if verbose:
print(" Clipping feature in {}".format(wlm[i]))
if wlm[i] > exclude_ranges_high[rango] and imprimir == 0:
if verbose:
print(" Checked {} End range {} {} {}".format(
wlm[i], rango,
exclude_ranges_low[rango],
exclude_ranges_high[rango]
)
)
rango = rango + 1
imprimir = 1
if rango == len(exclude_ranges_low):
rango = len(exclude_ranges_low) - 1
# print " Checking ", wlm[i]," CORRECTED IF NEEDED",s[i], s[i]-fit_median[i]
# if plot:
# for i in range(6):
# plt.figure(figsize=(fig_size, fig_size/2.5))
# plt.plot(wlm,old-fit_median, "r-", alpha=0.4)
# plt.plot(wlm,fit_median-fit_median,"g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
# plt.plot(wlm,s-fit_median, "b-", alpha=0.7)
#
# for exclude in range(len(exclude_ranges_low)):
# plt.axvspan(exclude_ranges_low[exclude], exclude_ranges_high[exclude], facecolor='g', alpha=0.15,zorder=3)
#
# plt.ylim(-100,200)
# if i == 0: plt.xlim(wlm[0]-10,wlm[-1]+10)
# if i == 1: plt.xlim(wlm[0],6500) # THIS IS FOR 1000R
# if i == 2: plt.xlim(6500,6700)
# if i == 3: plt.xlim(6700,7000)
# if i == 4: plt.xlim(7000,7300)
# if i == 5: plt.xlim(7300,wlm[-1])
# plt.minorticks_on()
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
if plot:
for i in range(6):
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(wlm, old, "r-", alpha=0.4)
plt.plot(wlm, fit_median, "g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
plt.plot(wlm, s, "b-", alpha=0.7)
for exclude in range(len(exclude_ranges_low)):
plt.axvspan(
exclude_ranges_low[exclude],
exclude_ranges_high[exclude],
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.ylim(-300, 300)
if i == 0:
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
if i == 1:
plt.xlim(wlm[0], 6500) # THIS IS FOR 1000R
if i == 2:
plt.xlim(6500, 6700)
if i == 3:
plt.xlim(6700, 7000)
if i == 4:
plt.xlim(7000, 7300)
if i == 5:
plt.xlim(7300, wlm[-1])
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
self.intensity_corrected[fibre, :] = s
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_and_substract_sky_spectrum(
self,
sky,
w=1000,
spectra=1000,
# If rebin == True, it fits all wavelengths to be at the same wavelengths that SKY spectrum...
rebin=False,
brightest_line="Ha",
brightest_line_wavelength=6563.0,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=False,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
Given a 1D sky spectrum, this task fits
sky lines of each spectrum individually and substracts sky
Needs the observed wavelength (brightest_line_wavelength) of the brightest emission line (brightest_line) .
w is the wavelength
spec the 2D spectra
Parameters
----------
sky
w
spectra
rebin
brightest_line
brightest_line_wavelength
maxima_sigma
ymin
ymax
wmin
wmax
auto_scale_sky
warnings
verbose
plot
fig_size
fibre
Returns
-------
"""
if brightest_line_wavelength == 6563:
print("\n\n> WARNING: This is going to FAIL as the wavelength of the brightest emission line has not been included !!!")
print(" USING brightest_line_wavelength = 6563 as default ...\n\n")
brightest_line_wavelength_rest = 6562.82
if brightest_line == "O3" or brightest_line == "O3b":
brightest_line_wavelength_rest = 5006.84
if brightest_line == "Hb" or brightest_line == "hb":
brightest_line_wavelength_rest = 4861.33
print(" Using {:3} at rest wavelength {:6.2f} identified by the user at {:6.2f} to avoid fitting emission lines...".format(
brightest_line, brightest_line_wavelength_rest, brightest_line_wavelength
))
redshift = brightest_line_wavelength/brightest_line_wavelength_rest - 1.0
if w == 1000:
w = self.wavelength
if spectra == 1000:
spectra = copy.deepcopy(self.intensity_corrected)
if wmin == 0:
wmin = w[0]
if wmax == 0:
wmax = w[-1]
# Read file with sky emission lines
sky_lines_file = "sky_lines.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"])
number_sl = len(sl_center)
# MOST IMPORTANT EMISSION LINES IN RED
# 6300.30 [OI] -0.263 30.0 15.0 20.0 40.0
# 6312.10 [SIII] -0.264 30.0 18.0 5.0 20.0
# 6363.78 [OI] -0.271 20.0 4.0 5.0 30.0
# 6548.03 [NII] -0.296 45.0 15.0 55.0 75.0
# 6562.82 Ha -0.298 50.0 25.0 35.0 60.0
# 6583.41 [NII] -0.300 62.0 42.0 7.0 35.0
# 6678.15 HeI -0.313 20.0 6.0 6.0 20.0
# 6716.47 [SII] -0.318 40.0 15.0 22.0 45.0
# 6730.85 [SII] -0.320 50.0 30.0 7.0 35.0
# 7065.28 HeI -0.364 30.0 7.0 7.0 30.0
# 7135.78 [ArIII] -0.374 25.0 6.0 6.0 25.0
# 7318.39 [OII] -0.398 30.0 6.0 20.0 45.0
# 7329.66 [OII] -0.400 40.0 16.0 10.0 35.0
# 7751.10 [ArIII] -0.455 30.0 15.0 15.0 30.0
# 9068.90 [S-III] -0.594 30.0 15.0 15.0 30.0
el_list_no_z = [
6300.3,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7318.39,
7329.66,
7751.1,
9068.9,
]
el_list = (redshift + 1) * np.array(el_list_no_z)
# [OI] [SIII] [OI] Ha+[NII] HeI [SII] HeI [ArIII] [OII] [ArIII] [SIII]
el_low_list_no_z = [
6296.3,
6308.1,
6359.8,
6544.0,
6674.2,
6712.5,
7061.3,
7131.8,
7314.4,
7747.1,
9063.9,
]
el_high_list_no_z = [
6304.3,
6316.1,
6367.8,
6590.0,
6682.2,
6736.9,
7069.3,
7139.8,
7333.7,
7755.1,
9073.9,
]
el_low_list = (redshift + 1) * np.array(el_low_list_no_z)
el_high_list = (redshift + 1) * np.array(el_high_list_no_z)
# Double Skylines
dsky1 = [
6257.82,
6465.34,
6828.22,
6969.70,
7239.41,
7295.81,
7711.50,
7750.56,
7853.391,
7913.57,
7773.00,
7870.05,
8280.94,
8344.613,
9152.2,
9092.7,
9216.5,
8827.112,
8761.2,
0,
] # 8760.6, 0]#
dsky2 = [
6265.50,
6470.91,
6832.70,
6978.45,
7244.43,
7303.92,
7715.50,
7759.89,
7860.662,
7921.02,
7780.43,
7879.96,
8288.34,
8352.78,
9160.9,
9102.8,
9224.8,
8836.27,
8767.7,
0,
] # 8767.2, 0] #
say_status = 0
# plot=True
# verbose = True
# warnings = True
self.wavelength_offset_per_fibre = []
self.sky_auto_scale = []
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre,
fibre * 100.0 / self.n_spectra
)
)
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
skip_sl_fit = [] # True emission line, False no emission line
j_lines = 0
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
sky_sl_gaussian_fitted = copy.deepcopy(sky)
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in sky spectrum...")
for i in range(number_sl):
if sl_center[i] > el_high:
while sl_center[i] > el_high:
j_lines = j_lines + 1
if j_lines < len(el_low_list) - 1:
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
# print "Change to range ",el_low,el_high
else:
el_low = w[-1] + 1
el_high = w[-1] + 2
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=2.1 * 2.355,
broad2=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
) # Broad is FWHM for Gaussian sigm a= 1,
di = di + 1
else:
resultado = fluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sky_sl_gaussian_fitted = resultado[11]
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
if el_low < sl_center[i] < el_high:
if verbose:
print(" SKY line {} in EMISSION LINE !".format(sl_center[i]))
skip_sl_fit.append(True)
else:
skip_sl_fit.append(False)
# print " Fitted wavelength for sky line ",sl_center[i]," : ",resultado[1]," ",resultado[5]
if plot_fit:
if verbose:
print(" Fitted wavelength for sky line {} : {} sigma = {}".format(
sl_center[i], sl_gauss_center[i], sl_gaussian_sigma[i])
)
wmin = sl_lmin[i]
wmax = sl_lmax[i]
# Gaussian fit to object spectrum
object_sl_gaussian_flux = []
object_sl_gaussian_sigma = []
ratio_object_sky_sl_gaussian = []
dif_center_obj_sky = []
spec = spectra[fibre]
object_sl_gaussian_fitted = copy.deepcopy(spec)
object_sl_gaussian_center = []
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in fibre {} of object data...".format(fibre))
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if skip_sl_fit[i]:
if verbose:
print(" SKIPPING SKY LINE {} as located within the range of an emission line!".format(
sl_center[i]))
object_sl_gaussian_flux.append(
float("nan")
) # The value of the SKY SPECTRUM
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
else:
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=sl_gaussian_sigma[i] * 2.355,
broad2=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
)
di = di + 1
if (
resultado[3] > 0
and resultado[5] / 2.355 < maxima_sigma
and resultado[13] > 0
and resultado[14] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
use_sigma = resultado[5] / 2.355
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(use_sigma)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
else:
resultado = fluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigma= 1,
# print sl_center[i],sl_gaussian_sigma[i], resultado[5]/2.355, maxima_sigma
if (
resultado[3] > 0 and resultado[5] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(resultado[5] / 2.355)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
ratio_object_sky_sl_gaussian.append(
old_div(object_sl_gaussian_flux[i], sl_gaussian_flux[i])
) # TODO: to remove once sky_line_fitting is active and we can do 1Dfit
# Scale sky lines that are located in emission lines or provided negative values in fit
# reference_sl = 1 # Position in the file! Position 1 is sky line 6363.4
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
if verbose:
print("\n> Correcting skylines for which we couldn't get a Gaussian fit...\n")
for i in range(number_sl):
if skip_sl_fit[i] == True:
# Use known center, sigma of the sky and peak
gauss_fix = sl_gaussian_sigma[i]
small_center_correction = 0.0
# Check if center of previous sky line has a small difference in wavelength
small_center_correction = np.nanmedian(dif_center_obj_sky[0:i])
if verbose:
print("- Small correction of center wavelength of sky line {} : {}".format(
sl_center[i], small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
sl_center[i] + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# Substract second Gaussian if needed !!!!!
for di in range(len(dsky1) - 1):
if sl_center[i] == dsky1[di]:
if verbose:
print(" This was a double sky line, also substracting {} at {}".format(
dsky2[di], np.array(dsky2[di]) + small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
np.array(dsky2[di]) + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# wmin,wmax = 6100,6500
# ymin,ymax= -100,400
#
# wmin,wmax = 6350,6700
# wmin,wmax = 7100,7700
# wmin,wmax = 7600,8200
# wmin,wmax = 8200,8500
# wmin,wmax = 7350,7500
# wmin,wmax=6100, 8500 #7800, 8000#6820, 6850 #6700,7000 #6300,6450#7500
# wmin,wmax = 8700,9300
# ymax=800
if plot:
plt.figure(figsize=(11, 4))
plt.plot(w, spec, "y", alpha=0.7, label="Object")
plt.plot(
w,
object_sl_gaussian_fitted,
"k",
alpha=0.5,
label="Obj - sky fitted",
)
plt.plot(w, sky_sl_gaussian_fitted, "r", alpha=0.5, label="Sky fitted")
plt.plot(w, spec - sky, "g", alpha=0.5, label="Obj - sky")
plt.plot(
w,
object_sl_gaussian_fitted - sky_sl_gaussian_fitted,
"b",
alpha=0.9,
label="Obj - sky fitted - rest sky",
)
plt.xlim(wmin, wmax)
plt.ylim(ymin, ymax)
ptitle = "Fibre " + np.str(fibre) # +" with rms = "+np.str(rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux [counts]")
plt.legend(frameon=True, loc=2, ncol=5)
plt.minorticks_on()
for i in range(len(el_list)):
plt.axvline(x=el_list[i], color="k", linestyle="--", alpha=0.5)
for i in range(number_sl):
if sl_fnl[i] == 1:
plt.axvline(
x=sl_center[i], color="brown", linestyle="-", alpha=1
)
else:
plt.axvline(
x=sl_center[i], color="y", linestyle="--", alpha=0.6
)
for i in range(len(dsky2) - 1):
plt.axvline(x=dsky2[i], color="orange", linestyle="--", alpha=0.6)
# plt.show()
# plt.close()
offset = np.nanmedian(
np.array(object_sl_gaussian_center) - np.array(sl_gauss_center)
)
if verbose:
# reference_sl = 1 # Position in the file!
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
# print "\n n line fsky fspec fspec/fsky l_obj-l_sky fsky/6363.4 sigma_sky sigma_fspec"
# #print "\n n c_object c_sky c_obj-c_sky"
# for i in range(number_sl):
# if skip_sl_fit[i] == False: print "{:2} {:6.1f} {:8.2f} {:8.2f} {:7.4f} {:5.2f} {:6.3f} {:6.3f} {:6.3f}" .format(i+1,sl_center[i],sl_gaussian_flux[i],object_sl_gaussian_flux[i],ratio_object_sky_sl_gaussian[i],object_sl_gaussian_center[i]-sl_gauss_center[i],sl_ref_ratio[i],sl_gaussian_sigma[i],object_sl_gaussian_sigma[i])
# #if skip_sl_fit[i] == False: print "{:2} {:9.3f} {:9.3f} {:9.3f}".format(i+1, object_sl_gaussian_center[i], sl_gauss_center[i], dif_center_obj_sky[i])
#
print("\n> Median center offset between OBJ and SKY : {} A\n> Median gauss for the OBJECT {} A".format(offset, np.nanmedian(object_sl_gaussian_sigma)))
print("> Median flux OBJECT / SKY = {}".format(np.nanmedian(ratio_object_sky_sl_gaussian)))
self.wavelength_offset_per_fibre.append(offset)
# plt.plot(object_sl_gaussian_center, ratio_object_sky_sl_gaussian, "r+")
if auto_scale_sky:
if verbose:
print("\n> As requested, using this value to scale sky spectrum before substraction... ")
auto_scale = | np.nanmedian(ratio_object_sky_sl_gaussian) | numpy.nanmedian |
#===========================================#
# #
# #
#----------CROSSWALK RECOGNITION------------#
#-----------WRITTEN BY N.DALAL--------------#
#-----------------2017 (c)------------------#
# #
# #
#===========================================#
#Copyright by <NAME>, 2017 (c)
#Licensed under the MIT License:
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import cv2
import math
import scipy.misc
import PIL.Image
import statistics
import timeit
import glob
from sklearn import linear_model, datasets
#==========================#
#---------functions--------#
#==========================#
#get a line from a point and unit vectors
def lineCalc(vx, vy, x0, y0):
scale = 10
x1 = x0+scale*vx
y1 = y0+scale*vy
m = (y1-y0)/(x1-x0)
b = y1-m*x1
return m,b
#the angle at the vanishing point
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
print(len1)
print(len2)
a=math.acos(inner_product/(len1*len2))
return a*180/math.pi
#vanishing point - cramer's rule
def lineIntersect(m1,b1, m2,b2) :
#a1*x+b1*y=c1
#a2*x+b2*y=c2
#convert to cramer's system
a_1 = -m1
b_1 = 1
c_1 = b1
a_2 = -m2
b_2 = 1
c_2 = b2
d = a_1*b_2 - a_2*b_1 #determinant
dx = c_1*b_2 - c_2*b_1
dy = a_1*c_2 - a_2*c_1
intersectionX = dx/d
intersectionY = dy/d
return intersectionX,intersectionY
#process a frame
def process(im):
start = timeit.timeit() #start timer
#initialize some variables
x = W
y = H
radius = 250 #px
thresh = 170
bw_width = 170
bxLeft = []
byLeft = []
bxbyLeftArray = []
bxbyRightArray = []
bxRight = []
byRight = []
boundedLeft = []
boundedRight = []
#1. filter the white color
lower = np.array([170,170,170])
upper = | np.array([255,255,255]) | numpy.array |
'''
This script reads the results of the previous script, 4_DictL_generate_test_commands.py, and prepare a graph that shows the
statistics of the DictL test runs (for Fig5 in the paper).
(c) <NAME>, UC Berkeley, 2021
'''
import numpy as np
import matplotlib.pyplot as plt
R = np.array([4])
N_examples = 122 # the number of slices in our test set
#data_type_str = 'test'
pad_ratio_vec = np.array([1,1.25,1.5,1.75,2])
sampling_type_vec = np.array([1,2]) # 0 = random, 1 = strong var-dens, 2 = weak var-dens
# initialize arrays
DictL_NRMSE_test_set = | np.zeros((N_examples,pad_ratio_vec.shape[0],sampling_type_vec.shape[0])) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Copyright Netherlands eScience Center
Function : Forecast Lorenz 84 model - Train BayesConvLSTM model
Author : <NAME>
First Built : 2020.03.09
Last Update : 2020.04.12
Library : Pytorth, Numpy, NetCDF4, os, iris, cartopy, dlacs, matplotlib
Description : This notebook serves to predict the Lorenz 84 model using deep learning. The Bayesian Convolutional
Long Short Time Memory neural network is used to deal with this spatial-temporal sequence problem.
We use Pytorch as the deep learning framework.
Return Values : pkl model and figures
"""
import sys
import warnings
import numbers
import logging
import time as tttt
# for data loading
import os
from netCDF4 import Dataset
# for pre-processing and machine learning
import numpy as np
import sklearn
#import scipy
import torch
import torch.nn.functional
#sys.path.append(os.path.join('C:','Users','nosta','ML4Climate','Scripts','DLACs'))
#sys.path.append("C:\\Users\\nosta\\ML4Climate\\Scripts\\DLACs")
sys.path.append("../")
import dlacs
import dlacs.BayesConvLSTM
import dlacs.preprocess
import dlacs.function
# for visualization
import dlacs.visual
import matplotlib
# Generate images without having a window appear
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import iris # also helps with regriding
import cartopy
import cartopy.crs as ccrs
# ignore all the DeprecationWarnings by pytorch
if not sys.warnoptions:
warnings.simplefilter("ignore")
# constants
constant = {'g' : 9.80616, # gravititional acceleration [m / s2]
'R' : 6371009, # radius of the earth [m]
'cp': 1004.64, # heat capacity of air [J/(Kg*K)]
'Lv': 2500000, # Latent heat of vaporization [J/Kg]
'R_dry' : 286.9, # gas constant of dry air [J/(kg*K)]
'R_vap' : 461.5, # gas constant for water vapour [J/(kg*K)]
'rho' : 1026, # sea water density [kg/m3]
}
# calculate the time for the code execution
start_time = tttt.time()
#################################################################################
######### datapath ########
#################################################################################
# ** Reanalysis **
# **ERA-Interim** 1979 - 2016 (ECMWF)
# **ORAS4** 1958 - 2014 (ECMWF)
# please specify data path
datapath = '/projects/0/blueactn/dataBayes'
output_path = '/home/lwc16308/BayesArctic/DLACs/models/'
#################################################################################
######### main ########
#################################################################################
# set up logging files
logging.basicConfig(filename = os.path.join(output_path,'logFile_Lorenz84_train.log'),
filemode = 'w+', level = logging.DEBUG,
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('matplotlib.font_manager').disabled = True
if __name__=="__main__":
print ('*********************** get the key to the datasets *************************')
#################################################################################
########### configure Lorenz 84 model ###########
#################################################################################
logging.info("Configure Lorenz 84 model")
# Lorenz paramters and initial conditions
x_init = 1.0 # strength of the symmetric globally encircling westerly current
y_init = 1.0 # strength of the cosine phases of a chain of superposedwaves (large scale eddies)
z_init = 1.0 # strength of the sine phases of a chain of superposedwaves (large scale eddies)
F = 8.0 # thermal forcing term
G = 1.0 # thermal forcing term
a = 0.25 # stiffness factor for westerly wind x
b = 4.0 # advection strength of the waves by the westerly current
# assuming the damping time for the waves is 5 days (Lorens 1984)
dt = 0.0333 # 1/30 unit of time unit (5 days)
num_steps = 1500
# cut-off point of initialization period
cut_off = 300
logging.info("#####################################")
logging.info("Summary of Lorenz 84 model")
logging.info("x = 1.0 y = 1.0 z = 1.0")
logging.info("F = 8.0 G = 1.0 a = 0.25 b = 4.0")
logging.info("unit time step 0.0333 (~5days)")
logging.info("series length 1500 steps")
logging.info("cut-off length 300 steps")
logging.info("#####################################")
#################################################################################
########### Lorens 84 model ###########
#################################################################################
def lorenz84(x, y, z, a = 0.25, b = 4.0, F = 8.0, G = 1.0):
"""
Solver of Lorens-84 model.
param x, y, z: location in a 3D space
param a, b, F, G: constants and forcing
"""
dx = - y**2 - z**2 - a * x + a * F
dy = x * y - b * x * z - y + G
dz = b * x * y + x * z - z
return dx, dy, dz
#################################################################################
########### Launch Lorenz 84 model ###########
#################################################################################
logging.info("Launch Lorenz 84 model")
# Need one more for the initial values
x = np.empty(num_steps)
y = np.empty(num_steps)
z = np.empty(num_steps)
# save initial values
x[0] = x_init
y[0] = y_init
z[0] = z_init
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps-1):
dx, dy, dz = lorenz84(x[i], y[i], z[i])
x[i + 1] = x[i] + (dx * dt)
y[i + 1] = y[i] + (dy * dt)
z[i + 1] = z[i] + (dz * dt)
#################################################################################
########### Prepare Lorenz 84 model output for learning ###########
#################################################################################
# time series cut-off
x = x[cut_off:]
y = y[cut_off:]
z = z[cut_off:]
print ('=================== normalize data =====================')
x_norm = dlacs.preprocess.operator.normalize(x)
y_norm = dlacs.preprocess.operator.normalize(y)
z_norm = dlacs.preprocess.operator.normalize(z)
print('================ save the normalizing factor =================')
x_max = np.amax(x)
x_min = np.amin(x)
y_max = np.amax(y)
y_min = | np.amin(y) | numpy.amin |
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
import cv2
from tensorflow.keras.callbacks import Callback
from .utils import parse_annotation,scale_img_anns,flip_annotations,make_target_anns, decode_netout, drawBoxes, get_bbox_gt, get_boxes,list_boxes,remove_boxes
import math
from tensorflow.keras.models import save_model
from mean_average_precision.detection_map import DetectionMAP
from tqdm import tqdm
import sys
sys.path.append("..")
from gen_utils import remExt, hor_con, save_prev_metrics
from .models import custom_preprocess
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
def plot_loss(name,epoch,losses):
fig = plt.figure()
plt.plot(losses)
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['loss','val_loss'])
plt.grid()
fig.savefig('./det_output/training_loss_'+name+'.png')
plt.close()
return
def plot_map(name,epoch,metrics):
fig = plt.figure()
plt.plot(metrics)
plt.title('Model mAP')
plt.ylabel('mAP')
plt.xlabel('Epoch')
plt.legend(['map'])
plt.grid()
fig.savefig('./det_output/val_map_'+name+'.png')
plt.close()
return
class det_callback(Callback):
def on_train_begin(self, logs={}):
for layer in self.model.layers:
if (layer.name == 'class_branch'):
self.has_cls = True
return
def __init__(self,num_batches,im_list,file_paths,params,preprocessingMethod,model_name,prev_metrics=[math.inf,math.inf],vis=1):
self.im_list = im_list
self.yolo_params = params
self.preprocessingMethod = preprocessingMethod
self.num_batches = num_batches
self.losses = []
self.metrics = []
self.plt_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
self.loss_metrics = prev_metrics
self.model_name = model_name
self.best_epoch = 0
self.im_path = file_paths[0]
self.ann_path = file_paths[1]
self.has_cls = False
self.vis = vis
self.map = 0.
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self,epoch, logs={}):
print('\t Best Epoch: ', self.best_epoch)
self.pbar = tqdm(total=self.num_batches+1)
return
def on_epoch_end(self, epoch, logs={}):
self.losses.append([logs['loss'],logs['val_loss']])
if(np.mod(epoch+1,100)==0):
save_model(self.model, './saved_models/' + self.model_name + '_' + str(epoch+1) + '_.h5')
self.model.save_weights('./saved_models/' + self.model_name + '_' + str(epoch+1) + '_weights.h5')
print('\t -> Saving Checkpoint...')
plot_loss(self.plt_name+'_'+self.model_name,epoch,self.losses)
self.pbar.close()
frames=[]
for i in range(len(self.im_list)):
name = remExt(self.im_list[i])
WIDTH = self.yolo_params.NORM_W
HEIGHT = self.yolo_params.NORM_H
img_in = cv2.imread(self.im_path + name + '.jpg')
if (self.yolo_params.annformat == 'pascalvoc'):
train_ann = self.ann_path + name + '.xml'
if (self.yolo_params.annformat == 'OID'):
train_ann = self.ann_path + name + '.txt'
bboxes = parse_annotation(train_ann, self.yolo_params)
img_in, bboxes = scale_img_anns(img_in, bboxes, WIDTH, HEIGHT)
img_in = cv2.cvtColor(img_in, cv2.COLOR_BGR2RGB)
img = img_in.astype(np.float32)
if (self.preprocessingMethod == None):
img = custom_preprocess(img)
else:
img = self.preprocessingMethod(img)
img = np.expand_dims(img, 0)
net_out = self.model.predict(img, batch_size=1)
pred = net_out.squeeze()
image, boxes = decode_netout(img_in.copy(), pred, self.yolo_params, False, False, t_c=0.1, nms_thresh=0.5)
b = []
sc = []
l = []
idxs = []
for box in boxes:
b.append([box.xmin, box.ymin, box.xmax, box.ymax])
sc.append(box.get_score())
l.append(box.get_label())
do_nms=False
if (len(boxes) > 1 and do_nms==True):
idxs = cv2.dnn.NMSBoxes(b, np.array(sc, dtype=np.float), 0.1, 0.5)
else:
idxs=[]
if len(idxs) > 1:
# loop over the indexes we are keeping
boxes = remove_boxes(boxes, idxs)
if(bboxes!=[]):
gt_boxesx1y1x2y2 = np.array(bboxes[:, :4], dtype=np.float32)
gt_labels = np.array(bboxes[:, 4], dtype=np.float32)
else:
gt_boxesx1y1x2y2 = np.array([], dtype=np.float32)
gt_labels = np.array([], dtype=np.float32)
if (boxes == []):
bb = np.array([])
sc = | np.array([]) | numpy.array |
import numpy as np
import scipy.stats
from scipy import ndimage
from scipy.optimize import curve_fit
from imutils import nan_to_zero
# try to use cv2 for faster image processing
try:
import cv2
cv2.connectedComponents # relatively recent addition, so check presence
opencv_found = True
except (ImportError, AttributeError):
opencv_found = False
def measure_of_chaos(im, nlevels, overwrite=True, statistic=None):
"""
Compute a measure for the spatial chaos in given image using the level sets method.
:param im: 2d array
:param nlevels: how many levels to use
:type nlevels: int
:param overwrite: Whether the input image can be overwritten to save memory
:type overwrite: bool
:param statistic: callable that calculates a score (a number) for the object counts in the level sets. If
specified, this statistic will be used instead of the default one. The callable must take two arguments - the
object counts (sequence of ints) and the number of non-zero pixels in the original image (int) - and output a number
:return: the measured value
:rtype: float
:raises ValueError: if nlevels <= 0 or q_val is an invalid percentile or an unknown interp value is used
"""
statistic = statistic or _default_measure
# don't process empty images
if np.sum(im) <= 0:
return np.nan
sum_notnull = | np.sum(im > 0) | numpy.sum |
import io
import os
import zipfile
import numpy as np
from PIL import Image
from chainer.dataset import download
def get_facade():
root = download.get_dataset_directory('study_chainer/facade')
npz_path = os.path.join(root, 'base.npz')
url = 'http://cmp.felk.cvut.cz/~tylecr1/facade/CMP_facade_DB_base.zip'
def creator(path):
archive_path = download.cached_download(url)
images = []
labels = []
with zipfile.ZipFile(archive_path, 'r') as archive:
for i in range(1, 378+1):
image_name = 'base/cmp_b{:04d}.jpg'.format(i)
label_name = 'base/cmp_b{:04d}.png'.format(i)
image = Image.open(io.BytesIO(archive.read(image_name)))
image = np.asarray(image)
images.append(image)
label = Image.open(io.BytesIO(archive.read(label_name)))
label = | np.asarray(label) | numpy.asarray |
from ctypes import *
import numpy as np
import math
import keyboard
import matplotlib.pyplot as pl
from mpl_toolkits.mplot3d import Axes3D
class infoformat(Structure):
_fields_ = [\
("posx",c_double),("posy",c_double),("posz",c_double),\
("velocityx",c_double),("velocityy",c_double),("velocityz",c_double),\
("accx",c_double),("accy",c_double),("accz",c_double),\
("thetax",c_double),("thetay",c_double),("thetaz",c_double),\
("posx_t",c_double),("posy_t",c_double),("posz_t",c_double),\
("velocityx_t",c_double),("velocityy_t",c_double),("velocityz_t",c_double),\
("accx_t",c_double),("accy_t",c_double),("accz_t",c_double),\
("thetax_t",c_double),("thetay_t",c_double),("thetaz_t",c_double),\
("thrust",c_double)\
]
class imagecoor(Structure):
_fields_ = [\
("u",c_double),("v",c_double),("w",c_double)]
#windows version interface
dronesimapi = CDLL('./drone_sim.so')
#set input type
dronesimapi.siminit.argtype = [c_double,c_double,c_double,c_double,c_double,c_double,\
c_double,c_double,c_double,c_double,c_double,c_double,\
c_double,c_double,c_double,c_double]
dronesimapi.simrun.argtype = [c_double,c_double,c_double,\
c_double,c_double,c_double,\
c_ulonglong]
#set output type
dronesimapi.siminfo.restype = POINTER(infoformat)
dronesimapi.simprojection.argtype = [c_double,c_double,c_double,c_double,c_double,c_double,\
c_double,c_double,c_double,\
c_double,c_double]
dronesimapi.simprojection.restype = POINTER(imagecoor)
dronesimapi.installcamera.argtype = [c_double,c_double,c_double,\
c_double,c_double,c_double,c_double,c_double,c_double]
#interface warper:
def siminit(pos_hunter, ori_hunter, pos_target, ori_target,speed_upbound_hunter,speed_upbound_target,\
yawdot_bound_hunter = 180,yawdot_bound_target = 180):
dronesimapi.siminit(c_double(pos_hunter[0]),c_double(pos_hunter[1]),c_double(pos_hunter[2]),\
c_double(ori_hunter[0]),c_double(ori_hunter[1]),c_double(ori_hunter[2]),\
c_double(pos_target[0]),c_double(pos_target[1]),c_double(pos_target[2]),\
c_double(ori_target[0]),c_double(ori_target[1]),c_double(ori_target[2]),\
c_double(speed_upbound_hunter),c_double(speed_upbound_target),\
c_double(yawdot_bound_hunter),c_double(yawdot_bound_target))
def simrun(period,huntercmd,targetcmd = None):
# input : period time in second
if targetcmd:
dronesimapi.simrun(c_double(huntercmd[0]),c_double(huntercmd[1]),c_double(huntercmd[2]),c_double(huntercmd[3]),\
c_double(targetcmd[0]),c_double(targetcmd[1]),c_double(targetcmd[2]),c_double(targetcmd[3]),\
c_ulonglong(period))
else:
dronesimapi.simrun(c_double(huntercmd[0]),c_double(huntercmd[1]),c_double(huntercmd[2]),c_double(huntercmd[3]),\
c_double(0),c_double(0),c_double(0),c_double(0),\
c_ulonglong(period))
def siminfo():
outinfo = dronesimapi.siminfo()
pos_hunter = np.array([outinfo.contents.posx,outinfo.contents.posy,outinfo.contents.posz])
ori_hunter = np.array([outinfo.contents.thetax,outinfo.contents.thetay,outinfo.contents.thetaz])
acc_hunter = np.array([outinfo.contents.accx,outinfo.contents.accy,outinfo.contents.accz])
pos_target = np.array([outinfo.contents.posx_t,outinfo.contents.posy_t,outinfo.contents.posz_t])
ori_target = np.array([outinfo.contents.thetax_t,outinfo.contents.thetay_t,outinfo.contents.thetaz_t])
acc_target = np.array([outinfo.contents.accx_t,outinfo.contents.accy_t,outinfo.contents.accz_t])
return pos_hunter,ori_hunter,acc_hunter,pos_target,ori_target,acc_target,outinfo.contents.thrust
def projection(pos_hunter,ori_hunter,pos_target,w,h):
outcoor = dronesimapi.simprojection(c_double(pos_hunter[0]),c_double(pos_hunter[1]),c_double(pos_hunter[2]),\
c_double(ori_hunter[0]),c_double(ori_hunter[1]),c_double(ori_hunter[2]),\
c_double(pos_target[0]),c_double(pos_target[1]),c_double(pos_target[2]),\
c_double(w),c_double(h))
u,v,w = outcoor.contents.u,outcoor.contents.v,outcoor.contents.w
inscrean = True
if math.isnan(u) or math.isinf(u):
inscrean = False
if math.isnan(v) or math.isinf(v):
inscrean = False
if math.isnan(w) or math.isinf(w):
inscrean = False
if w < 0 or w > 1:
inscrean = False
return u,v,inscrean
def installcamera(installori,F,H,FOVnear,FOVfar):
#F,H is the angle in degrees, FOVnear and FOVfar are the position of near and far plane
def getnearsize(F,H,FOVnear):
F = np.cos(np.radians(F))
H = np.cos(np.radians(H))
D = np.matrix([[F + 1, F - 1],[H - 1, H + 1]])
b = np.matrix([[4 - 4*F],[4 - 4*H]]) * FOVnear**2
upandright = D.I*b
up = np.sqrt(upandright[0,0])/2
right = np.sqrt(upandright[1,0])/2
return up,right
up,right = getnearsize(F,H,FOVnear)
# print(up,right)
dronesimapi.installcamera(c_double(installori[0]),c_double(installori[1]),c_double(installori[2]),\
c_double(-right),c_double(right),c_double(-up),c_double(up),c_double(FOVnear),c_double(FOVfar))
def simstop():
dronesimapi.simstop()
##
def cmdfromkeyboard():
rolldict = {'a':-1,'d':1}
pitchdict = {'w':1,'s':-1}
yawdict = {'q':-1,'e':1}
throttledict = {'-':-1,'=':1}
def checkkeyboard(keydict,default_val):
for key in keydict.keys():
if keyboard.is_pressed(key):
return keydict[key]
return default_val
roll = checkkeyboard(rolldict,0)
pitch = checkkeyboard(pitchdict,0)
yaw = checkkeyboard(yawdict,0)
throttle = checkkeyboard(throttledict,0)
return roll,pitch,yaw,throttle
class visualdrone():
def __init__(self,viewrange = 50,arrowlen = 5):
self.range = viewrange
self.rawlen = self.range/arrowlen
fig = pl.figure(0)
self.axis3d = fig.add_subplot(111, projection='3d')
def render(self,pos_hunter,ori_hunter,pos_target,ori_target):
def Rot_bn(o):
A =o[2]
B =o[1]
C =o[0]
R = np.array([[np.cos(A)*np.cos(B), np.cos(A)*np.sin(B)*np.sin(C)-np.sin(A)*np.cos(C), np.cos(A)*np.sin(B)*np.cos(C) + | np.sin(A) | numpy.sin |
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import (assert_allclose, assert_equal, assert_,
run_module_suite, assert_raises)
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = np.array([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = np.array([0.074, 1.014, -0.383])
class BaseMixin(object):
def __init__(self):
self.rnd = np.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = np.array([-1, -10])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
lb = np.array([0.0, -np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
atol=1e-6)
lb = np.array([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.448427311733504, 0]),
atol=1e-15)
ub = np.array([np.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
ub = np.array([-1, np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
lb = np.array([0, -4])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = np.array([[-0.307, -0.184]])
b = | np.array([0.773]) | numpy.array |
'''
Name: load_ops.py
Desc: Input pipeline using feed dict method to provide input data to model.
Some of this code is taken from <NAME>'s colorzation github
and python caffe library.
Other parts of this code have been taken from <NAME>'s library
'''
from __future__ import absolute_import, division, print_function
import itertools
import json
import math
import numpy as np
from numpy import linalg as LA
import os
from PIL import Image
import PIL
import pdb
import pickle
import random
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
import skimage
import skimage.io
from skimage.transform import resize
import sklearn.neighbors as nn
import string
import subprocess
import sys
# import tensorflow as tf
from transforms3d import euler
import transforms3d
import traceback as tb
# if tf.__version__ == '0.10.0':
# tf_summary_scalar = tf.scalar_summary
# else:
# tf_summary_scalar = tf.summary.scalar
#######################
# Loading fns
#######################
def load_scaled_image( filename, color=True ):
"""
Load an image converting from grayscale or alpha as needed.
From KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
By kchen
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_gray=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def load_raw_image( filename, color=True, use_pil=False ):
"""
Load an image converting from grayscale or alpha as needed.
Adapted from KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with image original dtype and image pixel range
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
if use_pil:
img = Image.open( filename )
else:
img = skimage.io.imread(filename, as_gray=not color)
if use_pil:
return img
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
#########################
# Image manipulation fns
#########################
def resize_rescale_imagenet(img, new_dims, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = img[:,:,[2,1,0]] * 255.
mean_bgr = [103.062623801, 115.902882574, 123.151630838]
img = img - mean_bgr
return img
def resize_rescale_image_low_sat(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.1, 0.9)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image_low_sat_2(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.2, 0.8)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img ) # between [0,255] (512,512,3)
img = resize_image( img, new_dims, interp_order ) # between [0,1] (512,512,3)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip ) # between [-1,1] (256,256,3)
return img
def resize_rescale_image_gaussian_blur(img, new_dims, new_scale, interp_order=1, blur_strength=4, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=True )
blurred = gaussian_filter(img, sigma=blur_strength)
if not no_clip:
min_val, max_val = new_scale
np.clip(blurred, min_val, max_val, out=blurred)
return blurred
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
im = skimage.img_as_float(im).astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
return im
def resize_and_rescale_image_log( img, new_dims, offset=1., normalizer=1.):
"""
Resizes and rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
img = np.log( float( offset ) + img ) / normalizer
img = resize_image(img, new_dims)
return img
def rescale_image_log( img, offset=1., normalizer=1. ):
"""
Rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
return np.log( float( offset ) + img ) / normalizer
################
# Curvature #
#################
def curvature_preprocess(img, new_dims, interp_order=1):
img = resize_image(img, new_dims, interp_order)
img = img[:,:,:2]
img = img - [123.572, 120.1]
img = img / [31.922, 21.658]
return img
def curvature_preprocess_gaussian_with_blur(img, new_dims, interp_order=1, blur_strength=4):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
blurred = gaussian_filter(curv, sigma=blur_strength)
return blurred
def curvature_preprocess_gaussian(img, new_dims, interp_order=1):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
return curv
#################
# Denoising #
#################
def random_noise_image(img, new_dims, new_scale, interp_order=1 ):
"""
Add noise to an image
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a noisy version of the original clean image
"""
img = skimage.util.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.util.random_noise(img, var=0.01)
img = rescale_image( img, new_scale )
return img
#################
# Colorization #
#################
def to_light_low_sat(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = np.clip(img, 0.2, 0.8)
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2)
def to_light(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2)
def to_ab(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into ab
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a ab version of the original image
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,1:]
img = rescale_image( img, new_scale, current_scale=[-100,100])
return img
def ab_image_to_prob(img, new_dims, root, interp_order=1):
"""
Turn an image into a probability distribution across color pair specified in pts_in_hull.npy
It's referencing: https://github.com/richzhang/colorization
Args:
im : (H x W x K) ndarray
Returns:
Color label ground truth across 313 possible ab color combinations
"""
img = resize_image( img, new_dims, interp_order ).astype('uint8')
img = skimage.color.rgb2lab(img)[:,:,1:]
curr_dir = os.path.dirname(os.path.realpath(__file__))
cc = np.load(os.path.join(curr_dir, 'pts_in_hull.npy'))
K = cc.shape[0]
NN = 10
sigma = 5.
nbrs = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(cc)
num_pixels = img.shape[0] * img.shape[1]
img_flattened = img.reshape(num_pixels, img.shape[2])
encoded_flattened = np.zeros((num_pixels, K))
point_index = np.arange(0,num_pixels, dtype='int')[:, np.newaxis]
(dists, inds) = nbrs.kneighbors(img_flattened)
wts = np.exp(-dists**2/(2*sigma**2))
wts = wts/np.sum(wts,axis=1)[:,np.newaxis]
encoded_flattened[point_index, inds] = wts
encoded = encoded_flattened.reshape([img.shape[0], img.shape[1], K])
############## Prior Boost Mask #################
prior_factor = np.load(os.path.join(curr_dir, 'prior_factor_in_door.npy'))
encoded_maxid = np.argmax(encoded, axis=-1)
mask = prior_factor[encoded_maxid]
return encoded, mask
###################
# Context Encoder #
###################
def context_encoder_input( img, new_dims, new_scale, interp_order=1 ):
'''
Context encoder input function, substitute the middle section with constant
Returns:
----------
img: with center 1/4 being constant average value
'''
img = resize_rescale_image(img, new_dims, new_scale, interp_order=interp_order)
H,W,K = img.shape
img[ int(H/4):int(3*H/4), int(W/4):int(3*W/4), :] = 0
return img
def context_encoder_output(img, new_dims, new_scale, interp_order=1 ):
'''
Context encoder target function, take out the middle chunk
'''
whole_dims = (new_dims[0]*2, new_dims[1]*2)
img = resize_rescale_image(img, whole_dims, new_scale, interp_order=interp_order)
H,W,_ = img.shape
center_piece = img[ int(H/4):int(H/4)+new_dims[0]
, int(W/4):int(W/4)+new_dims[1], :]
return center_piece
#################################
# Discriminative Target Process #
#################################
def parse_filename( filename ):
"""
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
-----------
filename: a string in the formate specified above.
Returns:
-----------
path_to_root: path to data root directory
domain: domain name
model_id: model id
point_id: point id
view_id: view id
"""
components = filename.split("\\")
domain = components[-2]
name_components = components[-1].split('_')
root_length = len(components) - 3
if len(name_components) == 6:
point_id = name_components[1]
view_id = name_components[3]
elif len(name_components) == 1:
view_id = name_components[0]
point_id = components[root_length + 1]
root = components[0].split("/")
model_id = root[-1]
path_to_root = "/".join(root[0:-1])
return path_to_root, domain, model_id, point_id, view_id
preappend_slash = (filename[0] == '/')
components = filename.split('/')[preappend_slash:]
root_length = len(components) - 3
if preappend_slash:
path_to_root = os.path.join("/" , *components[:root_length])
else:
path_to_root = os.path.join(*components[:root_length])
model_id = components[root_length]
name_components = components[-1].split('_')
if len(name_components) == 6:
domain = components[root_length+1]
point_id = name_components[1]
view_id = name_components[3]
elif len(name_components) == 1:
view_id = name_components[0]
point_id = components[root_length+1]
domain = 'rgb'
return path_to_root, domain, model_id, point_id, view_id
def generate_rgb_image_filename_from_ID(root, model_id, point_id, view_id):
'''
Given the root, model_id, point_id, view_id of an image, return the rgb
file path of that image. The file path is in the format:
/{root}/{model_id}/rgb/
point_{point_id}_view_{view_id}_domain_rgb.png
Parameters:
-----------
root: path to root
model_id: id of the model
point_id: the id number of the point
view_id: the id number of views
Returns:
-----------
path: file path to the image file
'''
filename = "point_{point_id}_view_{view_id}_domain_rgb.png".format(
point_id=point_id, view_id=view_id)
path = os.path.join(root, model_id, 'rgb', filename)
return path
def make_image_filenames( filename, num_input):
'''
Turn one image filename that contains the information of a image pair into multiple
image filenames.
For camera pose matching.
The filename should be in the same format, except the point_id and view_id field is
multiple integers with length num_input separated by commas:
/{PATH_TO_ROOT}/{MODEL_ID}/{domain}/{LIST_OF_POINT_IDS}_
view_{LIST_OF_VIEW_IDS}_{SOMETHING ELSE}
Parameters:
-----------
filename: A filename that in the format specified as above.
num_input: length of the LIST_OF_POINT_IDS
Returns:
-----------
filenames: A list of image filenames
'''
if len(filename.split('/')) == 6 or len(filename.split('/')) == 8 :
return [filename] * num_input
root, domain, model_id, point_ids, view_ids = parse_filename( filename )
model_ids = model_id.split(',')
point_ids = point_ids.split(',')
view_ids = view_ids.split(',')
if len(view_ids) != num_input:
if len(view_ids) == 1 and len(point_ids) == 1:
image_name = generate_rgb_image_filename_from_ID(root, model_id, point_ids[0], view_ids[0])
image_name = [image_name] * num_input
return image_name
else:
raise ValueError("num_input doesn't match the length of view_ids")
filenames = []
if len(point_ids) == 1:
point_id = point_ids[0]
for index in range(num_input):
view_id = view_ids[index]
filenames.append(generate_rgb_image_filename_from_ID(root, model_id, point_id, view_id))
else:
for index in range(num_input):
view_id = view_ids[index]
point_id = point_ids[index]
if len(model_ids) > 1:
model_i = model_ids[index]
else:
model_i = model_id
filenames.append(generate_rgb_image_filename_from_ID(root, model_i, point_id, view_id))
return filenames
###################
# Point Matching #
###################
def point_match_new( filename ):
model_ids = filename.split('/')[0]
if len(model_ids.split(',')) == 2:
return 0
point_ids = filename.split('/')[-2]
if len(point_ids.split(',')) == 2:
return 0
return 1
################################
# Camera Pose Helper functions #
################################
def parse_fixated_filename( filename ):
"""
Fixated filename is stored in similar format as single filename, but with multiple views
Return a list of filenames that has root directory specifid by root_dir
Parameters:
-----------
filename: filename in the specific format
Returns:
-----------
full_paths: a list of full path to camera pose info for the point-view pair
"""
root, domain, model_id, point_id, num_views = parse_filename( filename )
view_ids = num_views.split(',')
new_domain = "fixatedpose"
domain = "points"
full_paths = []
for view_id in view_ids:
filename = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_id,
view_id=view_id,
domain=new_domain)
full_path = os.path.join(root, model_id, domain, filename)
full_paths.append(full_path)
return full_paths
def parse_nonfixated_filename( filename ):
"""
Nonfixated filename is stored in the format:
'/{ROOT}/{MODEL_ID}/{POINT_IDS}/{VIEW_IDS}'
POINT_IDS and VIEW_IDS are lists that are separated by comma.
Return a list of filenames that has root directory specifid by root_dir
Parameters:
-----------
filename: filename in the specific format
Returns:
-----------
full_paths: a list of full path to camera pose info for the point-view pair
"""
root, domain, model_id, num_points, num_views = parse_filename( filename )
point_ids = num_points.split(',')
view_ids = num_views.split(',')
domain = "points"
new_domain = "fixatedpose"
full_path = []
for i in range(len(point_ids)):
filename = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_ids[i],
view_id=view_ids[i],
domain=new_domain)
full_path_i = os.path.join(root, model_id, domain, filename)
full_path.append(full_path_i)
return full_path
def calculate_relative_camera_location(full_path1, full_path2):
"""
Given two file path to two json files, extract the 'camera_location'
and 'camera_rotation_final' field, and calcualte the relative camera pose
Parameters:
__________
full_path1, full_path2: paths to json information
Returns:
__________
camera_poses: vector that encode the camera pose info for two images
"""
assert os.path.isfile(full_path1) and os.path.isfile(full_path2)
with open(full_path1, 'r') as fp:
data1 = json.load(fp)
with open(full_path2, 'r') as fp:
data2 = json.load(fp)
key = ['camera_location', 'camera_rotation_final']
location1 = data1[key[0]]
location2 = data2[key[0]]
translation = np.asarray(location1) - np.asarray(location2)
return translation
def calculate_relative_camera_pose(full_path1, full_path2, fixated=True, raw=False):
"""
Given two file path to two json files, extract the 'camera_location'
and 'camera_rotation_final' field, and calcualte the relative camera pose
Parameters:
__________
full_path1, full_path2: paths to json information
Returns:
__________
camera_poses: vector that encode the camera pose info for two images
"""
assert os.path.isfile(full_path1) and os.path.isfile(full_path2)
with open(full_path1, 'r') as fp:
data1 = json.load(fp)
with open(full_path2, 'r') as fp:
data2 = json.load(fp)
key = ['camera_location', 'camera_rotation_final']
location1 = np.asarray(data1[key[0]])
rotation1 = data1[key[1]]
matrix1 = euler.euler2mat(*rotation1, axes='sxyz')
location2 = np.asarray(data2[key[0]])
rotation2 = data2[key[1]]
matrix2 = euler.euler2mat(*rotation2, axes='sxyz')
relative_rotation_matrix = np.matmul(np.transpose( matrix2 ), matrix1)
relative_rotation = euler.mat2euler(relative_rotation_matrix, axes='sxyz')
translation = np.matmul(np.transpose(matrix2), location1 - location2)
pose = np.hstack((relative_rotation, translation))
if not raw:
if fixated:
std = np.asarray([ 10.12015407, 8.1103528, 1.09171896, 1.21579016, 0.26040945, 10.05966329])
mean = np.asarray([ -2.67375523e-01, -1.19147040e-02, 1.14497274e-02, 1.10903410e-03, 2.10509948e-02, -4.02013549e+00])
else:
mean = np.asarray([ -9.53197445e-03, -1.05196691e-03, -1.07545642e-02,
2.08785638e-02, -9.27858049e-02, -2.58052205e+00])
std = np.asarray([ 1.02316223, 0.66477511, 1.03806996, 5.75692889, 1.37604962,
7.43157247])
pose = (pose - mean)/std
return pose
########################################
# Fixated and Non-fixated Camera Pose #
########################################
def nonfixated_camera_pose( filename ):
"""
Return two 6DOF camera pose vectors for two images of nonfixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_nonfixated_filename( filename )
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1], fixated=False)
return pose
def nonfixated_camera_rot( filename ):
"""
Return two 6DOF camera pose vectors for two images of nonfixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_nonfixated_filename( filename )
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1], fixated=False)
rot = pose[:3]
return rot
def fixated_camera_pose( filename ):
"""
Return two 6DOF camera pose vectors for two images of fixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1])
return pose
def fixated_camera_rot( filename ):
"""
Return two 6DOF camera pose vectors for two images of fixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1])
rot = pose[:3]
return rot
#################
# Ego-Motion #
#################
def triplet_fixated_egomotion( filename ):
"""
Given a filename that contains 3 different point-view combos, parse the filename
and return the pair-wise camera pose.
Parameters:
-----------
filename: a filename in the specific format.
Returns:
-----------
egomotion: a numpy array of length 18 (3x6).
(a concatanation of 3 6-DOF relative camera pose vector)
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 3 :
raise ValueError("quadruplet first view prediction with list shorter than 3")
# perm = range(3)
# random.shuffle(perm)
#full_paths = [full_paths[i] for i in perm]
poses = []
for i in range(2):
for j in range(i+1, 3):
pose = calculate_relative_camera_pose(full_paths[i], full_paths[j])
poses.append(pose)
poses = np.hstack(poses)
return poses
#################
# Jigsaw #
#################
def jigsaw_rand_index( filename ):
return random.randint(0,99)
def hamming_distance(p1, p2):
'''
Calculate the Hamming distance between two permutations
'''
if len(p1) != len(p2):
raise ValueError('two permutations have different length...')
total_diff = sum(e1 != e2 for e1, e2 in zip(p1, p2))
return total_diff / len(p1)
def get_max_hamming_distance_index(p, current):
'''
This function take in two sets of permutation, calcuate which permutation should
be added to the current set, which is the permutation that maximize the sum of
Hamming distance from current permutations.
Parameters:
-----------
p: the set of all candidate permutations
current: current set of chosen permutations
Returns:
-----------
next_index: the index in p that maximize Hamming distance
'''
max_index = -1
max_distance = -1
for i in range(len(p)):
entry_i_dist = 0
for j in range(len(current)):
entry_i_dist += hamming_distance(p[i], current[j])
if entry_i_dist > max_distance:
max_index = i
max_distance = entry_i_dist
return max_index, max_distance
def generate_permutation_set(length):
'''
This function generate the set of maximum Hamming distance permutation.
The set has size 100.
Returns:
---------
perm: set with 100 permutations that maximize Hamming distance.
'''
perm = []
total = math.factorial(9)
#p = list(itertools.permutations(range(9)))
p = []
for i in itertools.permutations(range(9)):
p.append(i)
print(i)
print('Finished generating entire set with size {s}'.format(s=len(p)))
p0 = random.randint(0,total-1)
perm.append(p.pop(p0))
for i in range(length-1):
print('entry {x} added...'.format(x=i+1))
next_index,_ = get_max_hamming_distance_index(p, perm)
perm.append(p.pop(next_index))
asset_dir = "../"
store_location = os.path.join( asset_dir, 'jigsaw_max_hamming_set.npy')
with open(store_location, 'wb') as store:
np.save(store, perm)
return perm
def generate_jigsaw_input_with_dropping( img, target, new_dims, new_scale, interp_order=1):
'''
Generate the 9 pieces input for Jigsaw task.
Parameters:
-----------
img: input image
target: length 9 permutation
Return:
-----------
input_imgs: 9 image pieces
'''
if len(target) != 9:
raise ValueError('Target permutation of Jigsaw is supposed to have lenght 9, getting {x} here'.format(len(target)))
img = rescale_image( img, new_scale )
H,W,K = img.shape
to_drop = random.sample(list(range(K)), K-1)
for channel in to_drop:
img[:,:,channel] = np.random.normal(0.0, 0.01, (H,W))
unitH = int(H / 3)
unitW = int(W / 3)
cropH = int(unitH * 0.9)
cropW = int(unitW * 0.9)
startH = unitH - cropH
startW = unitW - cropW
input_imgs = np.empty((9, new_dims[0], new_dims[1], K), dtype=np.float32)
for i in range(9):
pos = target[i]
posH = int(pos / 3) * unitH + random.randint(0, startH)
posW = int(pos % 3) * unitW + random.randint(0, startW)
img_piece = img[posH:posH+cropH,posW:posW+cropW,:]
input_imgs[i,:,:,:] = resize_image(img_piece, new_dims, interp_order)
return input_imgs
def generate_jigsaw_input( img, target, new_dims, new_scale, interp_order=1):
'''
Generate the 9 pieces input for Jigsaw task.
Parameters:
-----------
img: input image
target: length 9 permutation
Return:
-----------
input_imgs: 9 image pieces
'''
if len(target) != 9:
raise ValueError('Target permutation of Jigsaw is supposed to have lenght 9, getting {x} here'.format(len(target)))
img = rescale_image( img, new_scale )
H,W,K = img.shape
unitH = int(H / 3)
unitW = int(W / 3)
cropH = int(unitH * 0.9)
cropW = int(unitW * 0.9)
startH = unitH - cropH
startW = unitW - cropW
input_imgs = np.empty((9, new_dims[0], new_dims[1], K), dtype=np.float32)
for i in range(9):
pos = target[i]
posH = int(pos / 3) * unitH + random.randint(0, startH)
posW = int(pos % 3) * unitW + random.randint(0, startW)
img_piece = img[posH:posH+cropH,posW:posW+cropW,:]
input_imgs[i,:,:,:] = resize_image(img_piece, new_dims, interp_order)
return input_imgs
def generate_jigsaw_input_for_representation_extraction( img, new_dims, new_scale, interp_order=1):
'''
Generate the 9 pieces input for Jigsaw task.
Parameters:
-----------
img: input image
target: length 9 permutation
Return:
-----------
input_imgs: 9 copies of the input image
'''
img = rescale_image( img, new_scale )
H,W,K = img.shape
input_imgs = np.empty((9, new_dims[0], new_dims[1], K), dtype=np.float32)
return resize_image(img, new_dims, interp_order)#input_imgs
###################
# Vanishing Point #
###################
def get_camera_matrix( view_dict, flip_xy=False ):
position = view_dict[ 'camera_location' ]
rotation_euler = view_dict[ 'camera_rotation_final' ]
R = transforms3d.euler.euler2mat( *rotation_euler, axes='sxyz' )
camera_matrix = transforms3d.affines.compose( position, R, np.ones(3) )
if flip_xy:
# For some reason the x and y are flipped in room layout
temp = np.copy(camera_matrix[0,:])
camera_matrix[0,:] = camera_matrix[1,:]
camera_matrix[1,:] = -temp
return camera_matrix
def get_camera_rot_matrix(view_dict, flip_xy=False):
return get_camera_matrix(view_dict, flip_xy=True)[:3, :3]
def rotate_world_to_cam( points, view_dict ):
cam_mat = get_camera_rot_matrix( view_dict, flip_xy=True )
new_points = cam_mat.T.dot(points).T[:,:3]
return new_points
def vanishing_point( filename ):
'''
Hemisphere projection of TOVP.
Returns:
--------
vanishing_point: length 9 vector
'''
root, domain, model_id, point_id, view_id = parse_filename(filename)
fname = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_id,
view_id=view_id,
domain='fixatedpose')
json_file = os.path.join(root, model_id, 'points', fname)
with open(json_file, 'r') as fp:
data = json.load(fp)
if 'vanishing_points_gaussian_sphere' not in data:
return model_id
vps = data['vanishing_points_gaussian_sphere']
vanishing_point = np.hstack((vps['x'], vps['y'], vps['z']))
return vanishing_point
def rotation_to_make_axes_well_defined(view_dict):
''' Rotates the world coords so that the -z direction of the camera
is within 45-degrees of the global +x axis '''
axes_xyz = np.eye(3)
apply_90_deg_rot_k_times = [
transforms3d.axangles.axangle2mat(axes_xyz[-1], k * math.pi/2)
for k in range(4) ]
global_x = np.array([axes_xyz[0]]).T
global_y = np.array([axes_xyz[1]]).T
best = (180., "Nothing")
for world_rot in apply_90_deg_rot_k_times:
global_x_in_cam = rotate_world_to_cam(
world_rot.dot(global_x), view_dict )
global_y_in_cam = rotate_world_to_cam(
world_rot.dot(global_y), view_dict )
# Project onto camera's horizontal (xz) plane
degrees_away_x = math.degrees(
math.acos(np.dot(global_x_in_cam, -axes_xyz[2]))
)
degrees_away_y = math.degrees(
math.acos(np.dot(global_y_in_cam, -axes_xyz[2]))
)
total_degrees_away = abs(degrees_away_x) + abs(degrees_away_y)
best = min(best, (total_degrees_away, np.linalg.inv(world_rot))) # python is neat
return best[-1]
def vanishing_point_well_defined( filename ):
root, domain, model_id, point_id, view_id = parse_filename(filename)
fname = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_id,
view_id=view_id,
domain='point_info')
json_file = os.path.join(root, model_id, 'point_info', fname)
with open(json_file, 'r') as fp:
data = json.load(fp)
cam_mat = get_camera_matrix( data, flip_xy=True )
world_transformation = rotation_to_make_axes_well_defined(data)
cam_mat[:3,:3] = np.dot(world_transformation, cam_mat[:3, :3])
R = cam_mat[:3,:3]
dist = 1.0
compass_points = [
(dist, 0, 0),
(0, dist, 0),
(0, 0, dist) ]
vanishing_point = [np.dot( np.linalg.inv(R), p ) for p in compass_points]
return np.array(vanishing_point).flatten()
###############
# Room Layout #
###############
def get_room_layout_cam_mat_and_ranges(view_dict, make_x_major=False):
# Get BB information
bbox_ranges = view_dict['bounding_box_ranges']
# BB seem to be off w.r.t. the camera matrix
ranges = [ bbox_ranges['x'], -np.array(bbox_ranges['y'])[::-1], bbox_ranges['z'] ]
camera_matrix = get_camera_matrix(view_dict, flip_xy=True)
if not make_x_major:
return camera_matrix, ranges
# print(world_points[:,-1])
# print(view_dict['camera_location'])
axes_xyz = np.eye(3)
apply_90_deg_rot_k_times = [
transforms3d.axangles.axangle2mat(axes_xyz[-1], k * math.pi/2)
for k in range(4) ]
def make_world_x_major(view_dict):
''' Rotates the world coords so that the -z direction of the camera
is within 45-degrees of the global +x axis '''
global_x = np.array([axes_xyz[0]]).T
best = (180., "Nothing")
for world_rot in apply_90_deg_rot_k_times:
global_x_in_cam = rotate_world_to_cam(
world_rot.dot(global_x), view_dict )
# Project onto camera's horizontal (xz) plane
degrees_away = math.degrees(
math.acos(np.dot(global_x_in_cam, -axes_xyz[2]))
)
best = min(best, (degrees_away, np.linalg.inv(world_rot))) # python is neat
# if abs(degrees_away) < 45.:
# return np.linalg.inv(world_rot)
return best[-1]
def update_ranges(world_rot, ranges):
new_ranges = np.dot(world_rot, ranges)
for i, rng in enumerate(new_ranges): # make sure rng[0] < rng[1]
if rng[0] > rng[1]:
new_ranges[i] = [rng[1], rng[0]]
return new_ranges
world_rot = | np.zeros((4,4)) | numpy.zeros |
import os, sys
import pickle, warnings
import pandas as pd
import numpy as np
import pmdarima as pm
from sklearn.linear_model import LinearRegression
# Working directory must be the higher .../app folder
if str(os.getcwd())[-3:] != 'app': raise Exception(f'Working dir must be .../app folder and not "{os.getcwd()}"')
from app.z_helpers import helpers as my
def _download_data_from_sql(data_version='final_data', recache=False):
from app.b_data_cleaning import get_dataset_registry
sql_table_name = get_dataset_registry()[data_version]['sql_table']
query = "SELECT * FROM {}".format(sql_table_name)
param_dic = my.get_credentials(credential='aws_databases')['aws']
cache_folder = os.path.join(my.get_project_directories(key='cache_dir'), 'raw_data')
data_file = os.path.join(cache_folder, (data_version + '.csv'))
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
if recache or not os.path.exists(data_file):
print('Getting raw data via sql...')
with my.postgresql_connect(param_dic) as conn:
df = pd.read_sql_query(query, con=conn)
obj_cols = df.select_dtypes(include='object').columns
df[obj_cols] = df[obj_cols].astype(str)
df.to_csv(data_file, index=False)
with open(data_file[:-4] + '.dtypes', 'wb') as f:
dtypes = df.dtypes.to_dict()
dtypes = dict(zip(dtypes.keys(), [str if i == np.object else i for i in dtypes.values()]))
pickle.dump(dtypes, f)
print('Raw data cached.')
else:
print('Raw data already cached.')
with open(data_file[:-4] + '.dtypes', 'rb') as f:
dtypes = pickle.load(f)
df = pd.read_csv(data_file, dtype=dtypes, index_col=False)
if data_version == 'handpicked_dataset':
app_dir = my.get_project_directories(key='app_dir')
file_path = os.path.join(app_dir, 'a_get_data', 'reuters_eikon', 'key_reuters_fields.csv')
data_dict = pd.read_csv(file_path)
data_dict['Clear Name'] = data_dict['Clear Name'].str.lower()
data_dict = data_dict.set_index('Clear Name')
new_data_dict = data_dict[['Data Type', 'Variable Type']].to_dict(orient='index')
fillnan_cols = []
formula_methods = []
for col in data_dict.columns.tolist():
if col[:8] == 'fillnan_':
fillnan_cols.append(col)
fillnan_cols = sorted(fillnan_cols, key=str.lower)
for index, row in data_dict[fillnan_cols].iterrows():
tmp = row.tolist()
tmp = [x for x in tmp if str(x) != 'nan']
new_data_dict[index]['Fill NaN Rules'] = tmp
for j in [i.split(':')[1] for i in tmp if i.split(':')[0] == 'formula']:
formula_methods.append((index, j))
else:
new_data_dict = None
formula_methods = None
return df, data_file, new_data_dict, formula_methods
def _shift_array(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result[:] = arr
return result
def _join_x_and_y(x, y, drop_nan=True):
out = np.hstack((x.reshape((-1, 1)), y.reshape((-1, 1))))
if drop_nan:
out = out[~ | np.isnan(out) | numpy.isnan |
"""
Double Integrator with noise in observations.
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
import scipy.stats as stats
import sympy as sp
import numpy as np
from sympy.physics.vector import dynamicsymbols as dynamicsymbols
import IPython as ipy
from filterpy.kalman import KalmanFilter
class DoubleIntegratorEnv(gym.Env):
"""
Description:
Double integrator
Observation:
Type: Box(2)
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Reward:
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self, noise_scale=0.001):
# self.kinematics_integrator = 'euler'
self.kinematics_integrator = 'semi-implicit'
self.nx = 2 # Number of states
self.ny = self.nx # Number of observations
self.nu = 3 # Number of control inputs
self.force_mag = 10.0 # scaling for control input
self.tau = 0.1 # Time step
self.T = 5 # 5 # 10 # Time horizon
self.action_space = spaces.Discrete(self.nu)
self.observation_space = spaces.Box(-np.inf*np.ones(self.ny), np.inf*np.ones(self.ny), dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.t = None
self.x_threshold = 1.0
self.x_dot_threshold = 1.0
self.x_range = [-self.x_threshold, self.x_threshold]
self.x_dot_range = [-self.x_dot_threshold, self.x_dot_threshold]
# Std. dev. of observation noise (pos, vel)
self.noise_scale = noise_scale
self.noise_std_dev = self.noise_scale*np.array([1.0, 1.0])
# Setup Kalman filter
self.kalman_filter = True
self.x0_belief_std_dev = 1.0*np.array([self.x_threshold, self.x_dot_threshold])
if self.kalman_filter:
# A and B matrices for linear system
if self.kinematics_integrator == 'euler':
A = np.array([[1,self.tau],[0,1]])
B = np.array([[0,self.tau]]).T
elif self.kinematics_integrator == 'semi-implicit':
A = np.array([[1,self.tau],[0,1]])
B = np.array([[self.tau**2,self.tau]]).T
else:
raise Exception("Integrator not recognized.")
filter = KalmanFilter(dim_x=self.nx, dim_z=self.ny)
filter.x = np.zeros((self.nx,1)) # Initial state estimate
filter.P = np.diag(self.x0_belief_std_dev**2) # covariance of initial belief
filter.Q = 0.0*np.eye(self.nx) # Process noise
filter.R = np.diag(self.noise_std_dev**2) # Measurement noise
filter.H = np.eye(self.nx) # Measurement function
filter.F = A # State transition matrix
filter.B = B # Control matrix
self.filter = filter
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def add_noise(self, obs):
noise = np.random.normal(np.zeros_like(obs), self.noise_std_dev)
obs_w_noise = obs + noise
return obs_w_noise
def get_p_y_x(self, observations, states):
if (len(states.shape) == 1): # Single query
observations = np.reshape(observations, (self.ny, 1))
states = np.reshape(states, (self.nx, 1))
# Vectorized computation of p_y_x. Expects arrays of shape (nx, num_samples).
num_samples = states.shape[1]
noises = np.repeat(np.reshape(self.noise_std_dev,(self.nx,1)), num_samples, 1)
p_ys_xs = np.prod(stats.norm.pdf(observations, states, noises),0)
return p_ys_xs
def step(self, action):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
x, x_dot = self.state
# u = self.force_mag if action == 1 else -self.force_mag
if action == 0:
u = 0.0
elif action == 1:
u = self.force_mag
else: # action == 2:
u = -self.force_mag
# elif action == 3:
# u = -0.5*self.force_mag
# else:
# u = -self.force_mag
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * u
elif self.kinematics_integrator == 'semi-implicit': # semi-implicit euler
x_dot = x_dot + self.tau * u
x = x + self.tau * x_dot
else:
raise Exception("Integrator not recognized.")
self.state = (x, x_dot)
# out_of_bounds = bool(
# x < -self.x_threshold
# or x > self.x_threshold
# or theta < -self.theta_threshold_radians
# or theta > self.theta_threshold_radians
# )
# Check if we have gone beyond time horizon
if (self.t > (self.T-1)):
self.steps_beyond_done = self.t - (self.T-1)
done = True
else:
done = False
if done: # done only if beyond time horizon
reward = 0.0
else:
# reward = 1 - out_of_bounds
reward_x = min(-x+self.x_threshold, x+self.x_threshold)
reward_x = reward_x/self.x_threshold
reward_x = min(reward_x, 0.8)/0.8
reward_x = max(0.0, reward_x)
reward_x_dot = min(-x_dot+self.x_dot_threshold, x_dot+self.x_dot_threshold)
reward_x_dot = reward_x_dot/self.x_dot_threshold
reward_x_dot = min(reward_x_dot, 0.8)/0.8
reward_x_dot = max(0.0, reward_x_dot)
reward = (reward_x + reward_x_dot)/2
if reward > 1:
ipy.embed()
obs_with_noise = self.add_noise(np.array(self.state))
# Kalman filter
if self.kalman_filter:
self.filter.predict(u=u)
self.filter.update(obs_with_noise)
state_estimate = np.reshape(self.filter.x, (self.nx,))
obs_with_noise = state_estimate
# Update time
self.t += 1
return obs_with_noise, reward, done, {}
def reset(self):
self.t = 0
# Uniform distribution
self.state = self.np_random.uniform(low=[self.x_range[0], self.x_dot_range[0]], high=[self.x_range[1],self.x_dot_range[1]])
# # Gaussian distribution
# self.state = self.np_random.normal(np.zeros(self.nx), self.x0_belief_std_dev)
# Generate observation
self.steps_beyond_done = None
obs_w_noise = self.add_noise(np.array(self.state))
# Reset filter
if self.kalman_filter:
self.filter.x = np.zeros((self.nx,1)) # Initial state estimate
self.filter.P = | np.diag(self.x0_belief_std_dev**2) | numpy.diag |
import unittest
import numpy
from cqcpy import test_utils
import cqcpy.spin_utils as spin_utils
import cqcpy.cc_equations as cc_equations
class CCRDMTest(unittest.TestCase):
def setUp(self):
pass
def test_1rdm_opt(self):
no = 4
nv = 8
thresh = 1e-12
T1, T2 = test_utils.make_random_T(no, nv)
L1, L2 = test_utils.make_random_L(no, nv)
pba_ref = cc_equations.ccsd_1rdm_ba(T1, T2, L1, L2)
pba_out = cc_equations.ccsd_1rdm_ba_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pba_ref - pba_out)/numpy.sqrt(pba_ref.size)
self.assertTrue(diff < thresh, "Error in p_ba: {}".format(diff))
pji_ref = cc_equations.ccsd_1rdm_ji(T1, T2, L1, L2)
pji_out = cc_equations.ccsd_1rdm_ji_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pji_ref - pji_out)/numpy.sqrt(pji_ref.size)
self.assertTrue(diff < thresh, "Error in p_ji: {}".format(diff))
pai_ref = cc_equations.ccsd_1rdm_ai(T1, T2, L1, L2)
pai_out = cc_equations.ccsd_1rdm_ai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pai_ref - pai_out)/numpy.sqrt(pai_ref.size)
self.assertTrue(diff < thresh, "Error in p_ai: {}".format(diff))
def test_2rdm_opt(self):
no = 4
nv = 8
thresh = 1e-12
T1, T2 = test_utils.make_random_T(no, nv)
L1, L2 = test_utils.make_random_L(no, nv)
pcdab_ref = cc_equations.ccsd_2rdm_cdab(T1, T2, L1, L2)
pcdab_out = cc_equations.ccsd_2rdm_cdab_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pcdab_ref - pcdab_out)
diff /= numpy.sqrt(pcdab_ref.size)
self.assertTrue(diff < thresh, "Error in p_cdab: {}".format(diff))
pbcai_ref = cc_equations.ccsd_2rdm_bcai(T1, T2, L1, L2)
pbcai_out = cc_equations.ccsd_2rdm_bcai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pbcai_ref - pbcai_out)
diff /= numpy.sqrt(pbcai_ref.size)
self.assertTrue(diff < thresh, "Error in p_bcai: {}".format(diff))
pbjai_ref = cc_equations.ccsd_2rdm_bjai(T1, T2, L1, L2)
pbjai_out = cc_equations.ccsd_2rdm_bjai_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pbjai_ref - pbjai_out)
diff /= numpy.sqrt(pbjai_ref.size)
self.assertTrue(diff < thresh, "Error in p_bjai: {}".format(diff))
pabij_ref = cc_equations.ccsd_2rdm_abij(T1, T2, L1, L2)
pabij_out = cc_equations.ccsd_2rdm_abij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pabij_ref - pabij_out)
diff /= numpy.sqrt(pabij_ref.size)
self.assertTrue(diff < thresh, "Error in p_abij: {}".format(diff))
pkaij_ref = cc_equations.ccsd_2rdm_kaij(T1, T2, L1, L2)
pkaij_out = cc_equations.ccsd_2rdm_kaij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pkaij_ref - pkaij_out)
diff /= numpy.sqrt(pkaij_ref.size)
self.assertTrue(diff < thresh, "Error in p_kaij: {}".format(diff))
pklij_ref = cc_equations.ccsd_2rdm_klij(T1, T2, L1, L2)
pklij_out = cc_equations.ccsd_2rdm_klij_opt(T1, T2, L1, L2)
diff = numpy.linalg.norm(pklij_ref - pklij_out)
diff /= numpy.sqrt(pklij_ref.size)
self.assertTrue(diff < thresh, "Error in p_klij: {}".format(diff))
def test_u1rdm(self):
noa = 3
nva = 5
nob = 2
nvb = 6
thresh = 1e-14
# use unrestricted one-particle property
Aa = test_utils.make_random_F(noa, nva)
Ab = test_utils.make_random_F(nob, nvb)
Atot = spin_utils.F_to_spin(Aa, Ab, noa, nva, nob, nvb)
# get unrestricted and general amplitudes
T1a, T1b = test_utils.make_random_T1_spatial(noa, nva, nob, nvb)
T2aa, T2ab, T2bb \
= test_utils.make_random_T2_spatial(noa, nva, nob, nvb)
L1a, L1b = test_utils.make_random_T1_spatial(nva, noa, nvb, nob)
L2aa, L2ab, L2bb \
= test_utils.make_random_T2_spatial(nva, noa, nvb, nob)
T1 = spin_utils.T1_to_spin(T1a, T1b, noa, nva, nob, nvb)
L1 = spin_utils.T1_to_spin(L1a, L1b, nva, noa, nvb, nob)
T2 = spin_utils.T2_to_spin(T2aa, T2ab, T2bb, noa, nva, nob, nvb)
L2 = spin_utils.T2_to_spin(L2aa, L2ab, L2bb, nva, noa, nvb, nob)
# make general pieces of 1-rdm
pia = L1.copy()
pba = cc_equations.ccsd_1rdm_ba_opt(T1, T2, L1, L2)
pji = cc_equations.ccsd_1rdm_ji_opt(T1, T2, L1, L2)
pai = cc_equations.ccsd_1rdm_ai_opt(T1, T2, L1, L2)
# make unrestricted 1-rdm
pia_a = L1a.copy()
pia_b = L1b.copy()
pba_a, pba_b = cc_equations.uccsd_1rdm_ba(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
pji_a, pji_b = cc_equations.uccsd_1rdm_ji(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
pai_a, pai_b = cc_equations.uccsd_1rdm_ai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
# ia
ref = numpy.einsum('ia,ai->', pia, Atot.vo)
out = numpy.einsum('ia,ai->', pia_a, Aa.vo)
out += numpy.einsum('ia,ai->', pia_b, Ab.vo)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pia: {}".format(diff))
# ba
ref = numpy.einsum('ba,ab->', pba, Atot.vv)
out = numpy.einsum('ba,ab->', pba_a, Aa.vv)
out += numpy.einsum('ba,ab->', pba_b, Ab.vv)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pba: {}".format(diff))
# ji
ref = numpy.einsum('ji,ij->', pji, Atot.oo)
out = numpy.einsum('ji,ij->', pji_a, Aa.oo)
out += numpy.einsum('ji,ij->', pji_b, Ab.oo)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pji: {}".format(diff))
# ai
ref = numpy.einsum('ai,ia->', pai, Atot.ov)
out = numpy.einsum('ai,ia->', pai_a, Aa.ov)
out += numpy.einsum('ai,ia->', pai_b, Ab.ov)
diff = abs(out - ref) / abs(ref)
self.assertTrue(diff < thresh, "Error in Pai: {}".format(diff))
def test_u2rdm(self):
noa = 3
nva = 5
nob = 2
nvb = 6
thresh = 1e-14
# use unrestricted one-particle property
Aa = test_utils.make_random_I_anti(noa, nva)
Ab = test_utils.make_random_I_anti(nob, nvb)
Aab = test_utils.make_random_Ifull_gen(
noa, nva, nob, nvb, noa, nva, nob, nvb)
Atot = spin_utils.int_to_spin2(Aa, Ab, Aab, noa, nva, nob, nvb)
# get unrestricted and general amplitudes
T1a, T1b = test_utils.make_random_T1_spatial(noa, nva, nob, nvb)
T2aa, T2ab, T2bb \
= test_utils.make_random_T2_spatial(noa, nva, nob, nvb)
L1a, L1b = test_utils.make_random_T1_spatial(nva, noa, nvb, nob)
L2aa, L2ab, L2bb \
= test_utils.make_random_T2_spatial(nva, noa, nvb, nob)
T1 = spin_utils.T1_to_spin(T1a, T1b, noa, nva, nob, nvb)
L1 = spin_utils.T1_to_spin(L1a, L1b, nva, noa, nvb, nob)
T2 = spin_utils.T2_to_spin(T2aa, T2ab, T2bb, noa, nva, nob, nvb)
L2 = spin_utils.T2_to_spin(L2aa, L2ab, L2bb, nva, noa, nvb, nob)
# make general pieces of 2-rdm
Pijab = L2.copy()
Pciab = cc_equations.ccsd_2rdm_ciab(T1, T2, L1, L2)
Pjkai = cc_equations.ccsd_2rdm_jkai(T1, T2, L1, L2)
Pcdab = cc_equations.ccsd_2rdm_cdab(T1, T2, L1, L2)
Pbjai = cc_equations.ccsd_2rdm_bjai(T1, T2, L1, L2)
Pklij = cc_equations.ccsd_2rdm_klij(T1, T2, L1, L2)
Pbcai = cc_equations.ccsd_2rdm_bcai(T1, T2, L1, L2)
Pkaij = cc_equations.ccsd_2rdm_kaij(T1, T2, L1, L2)
Pabij = cc_equations.ccsd_2rdm_abij(T1, T2, L1, L2)
# make unrestricted RDMs
Pijab_u = L2aa.copy()
PIJAB_u = L2bb.copy()
PiJaB_u = L2ab.copy()
Pciab_u, PCIAB_u, PcIaB_u, PCiAb_u = cc_equations.uccsd_2rdm_ciab(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pjkai_u, PJKAI_u, PjKaI_u, PJkAi_u = cc_equations.uccsd_2rdm_jkai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pcdab_u, PCDAB_u, PcDaB_u = cc_equations.uccsd_2rdm_cdab(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pbjai_u, PBJAI_u, PbJaI_u, PbJAi_u, PBjaI_u, PBjAi_u \
= cc_equations.uccsd_2rdm_bjai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pklij_u, PKLIJ_u, PkLiJ_u = cc_equations.uccsd_2rdm_klij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pbcai_u, PBCAI_u, PbCaI_u, PBcAi_u = cc_equations.uccsd_2rdm_bcai(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pkaij_u, PKAIJ_u, PkAiJ_u, PKaIj_u = cc_equations.uccsd_2rdm_kaij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
Pabij_u, PABIJ_u, PaBiJ_u = cc_equations.uccsd_2rdm_abij(
T1a, T1b, T2aa, T2ab, T2bb, L1a, L1b, L2aa, L2ab, L2bb)
# ijab
ref = numpy.einsum('ijab,abij->', Pijab, Atot.vvoo)
out = numpy.einsum('ijab,abij->', Pijab_u, Aa.vvoo)
out += numpy.einsum('ijab,abij->', PIJAB_u, Ab.vvoo)
out += 4.0*numpy.einsum('ijab,abij->', PiJaB_u, Aab.vvoo)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pijab: {}".format(diff))
# ciab
ref = numpy.einsum('ciab,abci->', Pciab, Atot.vvvo)
out = numpy.einsum('ciab,abci->', Pciab_u, Aa.vvvo)
out += numpy.einsum('ciab,abci->', PCIAB_u, Ab.vvvo)
out += 2.0*numpy.einsum('ciab,abci->', PcIaB_u, Aab.vvvo)
out += 2.0*numpy.einsum('ciab,baic->', PCiAb_u, Aab.vvov)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pciab: {}".format(diff))
# jkai
ref = numpy.einsum('jkai,aijk->', Pjkai, Atot.vooo)
out = numpy.einsum('jkai,aijk->', Pjkai_u, Aa.vooo)
out += numpy.einsum('jkai,aijk->', PJKAI_u, Ab.vooo)
out += 2.0*numpy.einsum('jKaI,aIjK->', PjKaI_u, Aab.vooo)
out += 2.0*numpy.einsum('JkAi,iAkJ->', PJkAi_u, Aab.ovoo)
diff = abs(out - ref) / abs(ref + 0.001)
self.assertTrue(diff < thresh, "Error in Pciab: {}".format(diff))
# cdab
ref = numpy.einsum('cdab,abcd->', Pcdab, Atot.vvvv)
out = numpy.einsum('cdab,abcd->', Pcdab_u, Aa.vvvv)
out += numpy.einsum('cdab,abcd->', PCDAB_u, Ab.vvvv)
out += 4.0* | numpy.einsum('cdab,abcd->', PcDaB_u, Aab.vvvv) | numpy.einsum |
"""utils for interpreting variant effect prediction for Heritability
"""
import gzip
import os
import sys
from collections import defaultdict
import h5py
import numpy as np
import pandas as pd
def read_vep(vep_dir, check_sanity=False):
_label_fn = [x for x in os.listdir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listdir(vep_dir) if x.endswith("_abs_diffs.h5")]
assert len(_label_fn) == len(
_data_fn) == 1, "Each folder must have exact one row_labels and one abs_diffs file; found %i row_labels and " \
"%i abs_diffs" % (len(_label_fn), len(_data_fn))
label_fn = os.path.join(vep_dir, _label_fn[0])
data_fn = os.path.join(vep_dir, _data_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
data_fh = h5py.File(data_fn, 'r')
try:
vep_data = data_fh['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
if check_sanity:
assert vep_data.shape[0] == np.sum(vep_df['ref_match'])
return vep_df, vep_data
def read_vep_logfc(vep_dir):
_label_fn = [x for x in os.listdir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listdir(vep_dir) if x.endswith("_abs_logfc.npz")]
_data_fn1 = [x for x in os.listdir(vep_dir) if x.endswith("ref_predictions.h5")]
_data_fn2 = [x for x in os.listdir(vep_dir) if x.endswith("alt_predictions.h5")]
label_fn = os.path.join(vep_dir, _label_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
if len(_data_fn):
assert len(_data_fn) == 1
vep_data = np.load(os.path.join(vep_dir, _data_fn[0]))['arr_0']
else:
assert len(_label_fn) == len(_data_fn1) == len(
_data_fn2) == 1, "Each folder must have exact one row_labels and one abs_diffs file; found %i row_labels " \
"and %i, %i abs_diffs" % ( len(_label_fn), len(_data_fn1), len(_data_fn2))
data_fn1 = os.path.join(vep_dir, _data_fn1[0])
data_fn2 = os.path.join(vep_dir, _data_fn2[0])
data_fh1 = h5py.File(data_fn1, 'r')
data_fh2 = h5py.File(data_fn2, 'r')
try:
vep_data1 = data_fh1['data'].value
vep_data2 = data_fh2['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
vep_data1 = np.clip(vep_data1, 0.0001, 0.9999)
vep_data2 = np.clip(vep_data2, 0.0001, 0.9999)
vep_data = np.abs(np.log(vep_data1 / (1 - vep_data1)) - np.log(vep_data2 / (1 - vep_data2)))
colmax = np.apply_along_axis(np.max, 0, vep_data) # vep_data is lower-bounded by 0
vep_data /= colmax
np.savez(os.path.join(vep_dir, "VEP_abs_logfc.npz"), vep_data)
return vep_df, vep_data
def convert_to_ldsc_annot_by_label(vep_df, vep_data, label_fp, baselineLD_dir, output_dir, resume_prev_run=False):
"""read in the h5 vep data snp annot and numerical values, convert to
the existing baselineLD annotations for next steps
"""
baselineLDs = [x for x in os.listdir(baselineLD_dir) if x.endswith("annot.gz")]
# label_df is annotation for output chromatin features
label_df = pd.read_table(label_fp)
# vep_dict is a mapping from chrom,bp to vep_data row index
vep_dict = defaultdict(list)
print('making vep mapper..')
for i in range(vep_df.shape[0]):
vep_dict[(vep_df.chrom[i], str(vep_df.pos[i]))].append(i)
# iterate through each labels in label_df, make an independent ldsc-annot
for k in range(label_df.shape[0]):
label_idx = label_df['label_idx'][k]
label_name = label_df['label_name'][k]
# normalize label names
label_name = label_name.replace('|', '--')
label_name = label_name.replace('(', '_').replace(')', '_')
label_output_dir = os.path.join(output_dir, label_name)
os.makedirs(label_output_dir, exist_ok=True)
print("%i/%i %s" % (k, label_df.shape[0], label_name))
for chrom_fn in baselineLDs:
chrom = chrom_fn.split(".")[-3]
print(chrom)
if resume_prev_run and os.path.isfile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom))):
print("found %s, skip" % chrom)
continue
with gzip.GzipFile(os.path.join(baselineLD_dir, chrom_fn), 'rb') as fi, gzip.GzipFile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom)), 'wb') as fo:
fi.readline() # pop first line
fo.write(("\t".join(['CHR', 'BP', 'SNP', 'CM', label_name]) + '\n').encode('utf-8'))
# for line in tqdm(fi):
for line in fi:
line = line.decode('utf-8')
ele = line.strip().split()
_chr, _bp, _snp, _cm = ele[0:4]
# _bp = str(int(_bp) - 1)
# _annot_idx = np.where(label_df.eval("pos==%s & chrom=='chr%s'"%(_bp, _chr)))[0]
_annot_idx = vep_dict[("chr%s" % _chr, _bp)]
if len(_annot_idx) == 0:
# this is less than 0.5% - ignored
# warnings.warn("baselineLD variant not found in vep: %s,%s"%(_chr, _bp))
# continue
_annot = "0"
else:
_annot = "%.5f" % | np.max(vep_data[_annot_idx, label_idx]) | numpy.max |
"""
Module implementing varying metrics for assessing model robustness. These fall mainly under two categories:
attack-dependent and attack-independent.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import config
import numpy as np
import numpy.linalg as la
import tensorflow as tf
from scipy.stats import weibull_min
from scipy.optimize import fmin as scipy_optimizer
from scipy.special import gammainc
from functools import reduce
from art.attacks.fast_gradient import FastGradientMethod
# TODO add all other implemented attacks
supported_methods = {
"fgsm": {"class": FastGradientMethod, "params": {"eps_step": 0.1, "eps_max": 1., "clip_min": 0., "clip_max": 1.}},
# "jsma": {"class": SaliencyMapMethod, "params": {"theta": 1., "gamma": 0.01, "clip_min": 0., "clip_max": 1.}}
}
def get_crafter(method, classifier, session, params=None):
try:
crafter = supported_methods[method]["class"](classifier, sess=session)
except:
raise NotImplementedError("{} crafting method not supported.".format(method))
if params:
crafter.set_params(**params)
else:
crafter.set_params(**supported_methods[method]["params"])
return crafter
def empirical_robustness(x, classifier, sess, method_name, method_params=None):
"""Compute the Empirical Robustness of a classifier object over the sample `x` for a given adversarial crafting
method `attack`. This is equivalent to computing the minimal perturbation that the attacker must introduce for a
successful attack. Paper link: https://arxiv.org/abs/1511.04599
:param x: Data sample of shape that can be fed into `classifier`
:type x: `np.ndarray`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param sess: The session for the computation
:type sess: `tf.Session`
:param method_name: adversarial attack name
:type method_name: `str`
:param method_params: Parameters specific to the adversarial attack
:type method_params: `dict`
:return: The average empirical robustness computed on `x`
:rtype: `float`
"""
crafter = get_crafter(method_name, classifier, sess, method_params)
adv_x = crafter.generate(x, minimal=True, **method_params)
# Predict the labels for adversarial examples
y = classifier.predict(x, verbose=0)
y_pred = classifier.predict(adv_x, verbose=0)
idxs = (np.argmax(y_pred, axis=1) != np.argmax(y, axis=1))
if np.sum(idxs) == 0.0:
return 0
perts_norm = la.norm((adv_x - x).reshape(x.shape[0], -1), ord=crafter.ord, axis=1)
perts_norm = perts_norm[idxs]
return np.mean(perts_norm / la.norm(x[idxs].reshape(np.sum(idxs), -1), ord=crafter.ord, axis=1))
def kernel_rbf(x, y, sigma=0.1):
"""Computes the RBF kernel
:param x: a tensor object or a numpy array
:param y: a tensor object or a numpy array
:param sigma: standard deviation
:return: a tensor object
"""
norms_x = tf.reduce_sum(x ** 2, 1)[:, None] # axis = [1] for later tf versions
norms_y = tf.reduce_sum(y ** 2, 1)[None, :]
dists = norms_x - 2 * tf.matmul(x, y, transpose_b=True) + norms_y
return tf.exp(-(1.0/(2.0*sigma)*dists))
def euclidean_dist(x, y):
"""Computes the Euclidean distance between x and y
:param x: A tensor object or a numpy array
:param y: A tensor object or a numpy array
:return: A tensor object
"""
norms_x = tf.reduce_sum(x ** 2, 1)[:, None] # axis = [1] for later tf versions
norms_y = tf.reduce_sum(y ** 2, 1)[None, :]
dists = norms_x - 2 * tf.matmul(x, y, transpose_b=True) + norms_y
return dists
def mmd(x_data, y_data, sess, sigma=0.1):
""" Computes the maximum mean discrepancy between x and y
:param x_data: Numpy array
:param y_data: Numpy array
:param sess: tf session
:param sigma: Standard deviation
:return: A float value corresponding to mmd(x_data, y_data)
"""
assert x_data.shape[0] == y_data.shape[0]
x_data = x_data.reshape(x_data.shape[0], np.prod(x_data.shape[1:]))
y_data = y_data.reshape(y_data.shape[0], np.prod(y_data.shape[1:]))
x = tf.placeholder(tf.float32, shape=x_data.shape)
y = tf.placeholder(tf.float32, shape=y_data.shape)
mmd_ = tf.reduce_sum(kernel_rbf(x, x, sigma)) - 2 * tf.reduce_sum(kernel_rbf(x, y, sigma)) \
+ tf.reduce_sum(kernel_rbf(y, y, sigma))
return sess.run(mmd_, feed_dict={x: x_data, y: y_data})
def nearest_neighbour_dist(x, classifier, x_train, sess, method_name, method_params=None):
"""
Compute the (average) nearest neighbour distance between the sets `x` and `x_train`: for each point in `x`,
measure the Euclidean distance to its closest point in `x_train`, then average over all points.
:param x: Data sample of shape that can be fed into `classifier`
:type x: `np.ndarray`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param x_train: Reference data sample to be considered as neighbors
:type x_train: `np.ndarray`
:param sess: The session for the computation
:type sess: `tf.Session`
:param method_name: adversarial attack name
:type method_name: `str`
:param method_params: Parameters specific to the adversarial attack
:type method_params: `dict`
:return: The average nearest neighbors distance
:rtype: `float`
"""
# Craft the adversarial examples
crafter = get_crafter(method_name, classifier, sess, method_params)
adv_x = crafter.generate(x, minimal=True, **method_params)
# Predict the labels for adversarial examples
y = classifier.predict(x, verbose=0)
y_pred = classifier.predict(adv_x, verbose=0)
adv_x_ = adv_x.reshape(adv_x.shape[0], np.prod(adv_x.shape[1:]))
x_ = x_train.reshape(x_train.shape[0], np.prod(x_train.shape[1:]))
dists = euclidean_dist(adv_x_, x_)
dists = np.min(sess.run(dists), 1) / la.norm(x.reshape(x.shape[0], -1), ord=2, axis=1)
idxs = (np.argmax(y_pred, axis=1) != np.argmax(y, axis=1))
avg_nn_dist = np.mean(dists[idxs])
return avg_nn_dist
def loss_sensitivity(x, classifier, sess):
"""
Local loss sensitivity estimated through the gradients of the loss at points in `x`, as defined in
https://arxiv.org/pdf/1706.05394.pdf.
:param x: Data sample of shape that can be fed into `classifier`
:type x: `np.ndarray`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param sess: The session for the computation
:type sess: `tf.Session`
:return: The average loss sensitivity of the model
:rtype: `float`
"""
from art.attacks.attack import class_derivative
x_op = tf.placeholder(dtype=tf.float32, shape=list(x.shape))
y_pred = classifier.predict(x)
indices = np.argmax(y_pred, axis=1)
grads = class_derivative(classifier._get_predictions(x_op, log=True), x_op,
classifier.model.get_output_shape_at(0)[1])
res = sess.run(grads, feed_dict={x_op: x})
res = np.asarray([r[0] for r in res])[indices, list(range(x.shape[0]))]
res = la.norm(res.reshape(res.shape[0], -1), ord=2, axis=1)
return np.mean(res)
def clever_u(x, classifier, n_b, n_s, r, sess, c_init=1):
"""
Compute CLEVER score for an untargeted attack. Paper link: https://arxiv.org/abs/1801.10578
:param x: One input sample
:type x: `np.ndarray`
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param n_b: Batch size
:type n_b: `int`
:param n_s: Number of examples per batch
:type n_s: `int`
:param r: Maximum perturbation
:type r: `float`
:param sess: The session to run graphs in
:type sess: `tf.Session`
:param c_init: initialization of Weibull distribution
:type c_init: `float`
:return: A tuple of 3 CLEVER scores, corresponding to norms 1, 2 and np.inf
:rtype: `tuple`
"""
# Get a list of untargeted classes
y_pred = classifier.predict(np.array([x]))
pred_class = np.argmax(y_pred, axis=1)[0]
num_class = np.shape(y_pred)[1]
untarget_classes = [i for i in range(num_class) if i != pred_class]
# Compute CLEVER score for each untargeted class
score1_list, score2_list, score8_list = [], [], []
for j in untarget_classes:
s1, s2, s8 = clever_t(x, classifier, j, n_b, n_s, r, sess, c_init)
score1_list.append(s1)
score2_list.append(s2)
score8_list.append(s8)
return np.min(score1_list), np.min(score2_list), np.min(score8_list)
def clever_t(x, classifier, target_class, n_b, n_s, r, sess, c_init=1):
"""
Compute CLEVER score for a targeted attack. Paper link: https://arxiv.org/abs/1801.10578
:param x: One input sample
:type x: `np.ndarray`
:param classifier: A trained model
:type classifier: :class:`Classifier`
:param target_class: Targeted class
:type target_class: `int`
:param n_b: Batch size
:type n_b: `int`
:param n_s: Number of examples per batch
:type n_s: `int`
:param r: Maximum perturbation
:type r: `float`
:param sess: The session to run graphs in
:type sess: `tf.Session`
:param c_init: Initialization of Weibull distribution
:type c_init: `float`
:return: A tuple of 3 CLEVER scores, corresponding to norms 1, 2 and np.inf
:rtype: `tuple`
"""
# Check if the targeted class is different from the predicted class
y_pred = classifier.predict(np.array([x]))
pred_class = np.argmax(y_pred, axis=1)[0]
if target_class == pred_class:
raise ValueError("The targeted class is the predicted class!")
# Define placeholders for computing g gradients
shape = [None]
shape.extend(x.shape)
imgs = tf.placeholder(shape=shape, dtype=tf.float32)
pred_class_ph = tf.placeholder(dtype=tf.int32, shape=[])
target_class_ph = tf.placeholder(dtype=tf.int32, shape=[])
# Define tensors for g gradients
grad_norm_1, grad_norm_2, grad_norm_8, g_x = _build_g_gradient(imgs, classifier, pred_class_ph, target_class_ph)
# Some auxiliary vars
set1, set2, set8 = [], [], []
dim = reduce(lambda x_, y: x_ * y, x.shape, 1)
shape = [n_s]
shape.extend(x.shape)
# Compute predicted class
y_pred = classifier.predict(np.array([x]))
pred_class = np.argmax(y_pred, axis=1)[0]
# Loop over n_b batches
for i in range(n_b):
# Random generation of data points
sample_xs0 = np.reshape(_random_sphere(m=n_s, n=dim, r=r), shape)
sample_xs = sample_xs0 + np.repeat(np.array([x]), n_s, 0)
np.clip(sample_xs, 0, 1, out=sample_xs)
# Preprocess data if it is supported in the classifier
if hasattr(classifier, 'feature_squeeze'):
sample_xs = classifier.feature_squeeze(sample_xs)
sample_xs = classifier._preprocess(sample_xs)
# Compute gradients
max_gn1, max_gn2, max_gn8 = sess.run(
[grad_norm_1, grad_norm_2, grad_norm_8],
feed_dict={imgs: sample_xs, pred_class_ph: pred_class,
target_class_ph: target_class})
set1.append(max_gn1)
set2.append(max_gn2)
set8.append(max_gn8)
# Maximum likelihood estimation for max gradient norms
[_, loc1, _] = weibull_min.fit(-np.array(set1), c_init, optimizer=scipy_optimizer)
[_, loc2, _] = weibull_min.fit(-np.array(set2), c_init, optimizer=scipy_optimizer)
[_, loc8, _] = weibull_min.fit(-np.array(set8), c_init, optimizer=scipy_optimizer)
# Compute g_x0
x0 = np.array([x])
if hasattr(classifier, 'feature_squeeze'):
x0 = classifier.feature_squeeze(x0)
x0 = classifier._preprocess(x0)
g_x0 = sess.run(g_x, feed_dict={imgs: x0, pred_class_ph: pred_class,
target_class_ph: target_class})
# Compute scores
# Note q = p / (p-1)
s8 = np.min([-g_x0[0] / loc1, r])
s2 = | np.min([-g_x0[0] / loc2, r]) | numpy.min |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from __future__ import print_function
import time
import numpy as np
_EPS = 1e-14
def mstamp(seq, sub_len, return_dimension=False):
""" multidimensional matrix profile with mSTAMP (stamp based)
Parameters
----------
seq : numpy matrix, shape (n_dim, seq_len)
input sequence
sub_len : int
subsequence length
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
Returns
-------
matrix_profile : numpy matrix, shape (n_dim, sub_num)
matrix profile
profile_index : numpy matrix, shape (n_dim, sub_num)
matrix profile index
profile_dimension : list, optional, shape (n_dim)
matrix profile dimension, this is only returned when return_dimension
is True
Notes
-----
<NAME>, <NAME>, and <NAME>, "Matrix Profile VI: Meaningful
Multidimensional Motif Discovery," IEEE ICDM 2017.
https://sites.google.com/view/mstamp/
http://www.cs.ucr.edu/~eamonn/MatrixProfile.html
"""
if sub_len < 4:
raise RuntimeError('Subsequence length (sub_len) must be at least 4')
exc_zone = sub_len // 2
seq = np.array(seq, dtype=float, copy=True)
if seq.ndim == 1:
seq = np.expand_dims(seq, axis=0)
seq_len = seq.shape[1]
sub_num = seq.shape[1] - sub_len + 1
n_dim = seq.shape[0]
skip_loc = np.zeros(sub_num, dtype=bool)
for i in range(sub_num):
if not np.all(np.isfinite(seq[:, i:i + sub_len])):
skip_loc[i] = True
seq[~np.isfinite(seq)] = 0
matrix_profile = np.empty((n_dim, sub_num))
matrix_profile[:] = np.inf
profile_index = -np.ones((n_dim, sub_num), dtype=int)
seq_freq = np.empty((n_dim, seq_len * 2), dtype=np.complex128)
seq_mu = np.empty((n_dim, sub_num))
seq_sig = np.empty((n_dim, sub_num))
if return_dimension:
profile_dimension = []
for i in range(n_dim):
profile_dimension.append(np.empty((i + 1, sub_num), dtype=int))
for i in range(n_dim):
seq_freq[i, :], seq_mu[i, :], seq_sig[i, :] = \
_mass_pre(seq[i, :], sub_len)
dist_profile = np.empty((n_dim, sub_num))
que_sig = np.empty(n_dim)
tic = time.time()
for i in range(sub_num):
cur_prog = (i + 1) / sub_num
time_left = ((time.time() - tic) / (i + 1)) * (sub_num - i - 1)
print('\rProgress [{0:<50s}] {1:5.1f}% {2:8.1f} sec'
.format('#' * int(cur_prog * 50),
cur_prog * 100, time_left), end="")
for j in range(n_dim):
que = seq[j, i:i + sub_len]
dist_profile[j, :], que_sig = _mass(
seq_freq[j, :], que, seq_len, sub_len,
seq_mu[j, :], seq_sig[j, :])
if skip_loc[i] or np.any(que_sig < _EPS):
continue
exc_zone_st = max(0, i - exc_zone)
exc_zone_ed = min(sub_num, i + exc_zone)
dist_profile[:, exc_zone_st:exc_zone_ed] = np.inf
dist_profile[:, skip_loc] = np.inf
dist_profile[seq_sig < _EPS] = np.inf
dist_profile_dim = np.argsort(dist_profile, axis=0)
dist_profile_sort = np.sort(dist_profile, axis=0)
dist_profile_cumsum = np.zeros(sub_num)
for j in range(n_dim):
dist_profile_cumsum += dist_profile_sort[j, :]
dist_profile_mean = dist_profile_cumsum / (j + 1)
update_pos = dist_profile_mean < matrix_profile[j, :]
profile_index[j, update_pos] = i
matrix_profile[j, update_pos] = dist_profile_mean[update_pos]
if return_dimension:
profile_dimension[j][:, update_pos] = \
dist_profile_dim[:j + 1, update_pos]
matrix_profile = np.sqrt(matrix_profile)
if return_dimension:
return matrix_profile, profile_index, profile_dimension
else:
return matrix_profile, profile_index,
def _mass_pre(seq, sub_len):
""" pre-computation for iterative call to MASS
Parameters
----------
seq : numpy array
input sequence
sub_len : int
subsequence length
Returns
-------
seq_freq : numpy array
sequence in frequency domain
seq_mu : numpy array
each subsequence's mu (mean)
seq_sig : numpy array
each subsequence's sigma (standard deviation)
Notes
-----
This functions is modified from the code provided in the following URL
http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html
"""
seq_len = len(seq)
seq_pad = np.zeros(seq_len * 2)
seq_pad[0:seq_len] = seq
seq_freq = np.fft.fft(seq_pad)
seq_cum = np.cumsum(seq_pad)
seq_sq_cum = np.cumsum(np.square(seq_pad))
seq_sum = (seq_cum[sub_len - 1:seq_len] -
np.concatenate(([0], seq_cum[0:seq_len - sub_len])))
seq_sq_sum = (seq_sq_cum[sub_len - 1:seq_len] -
np.concatenate(([0], seq_sq_cum[0:seq_len - sub_len])))
seq_mu = seq_sum / sub_len
seq_sig_sq = seq_sq_sum / sub_len - np.square(seq_mu)
seq_sig = np.sqrt(seq_sig_sq)
return seq_freq, seq_mu, seq_sig
def _mass(seq_freq, que, seq_len, sub_len, seq_mu, seq_sig):
""" iterative call of MASS
Parameters
----------
seq_freq : numpy array
sequence in frequency domain
que : numpy array
query
seq_len : int
sequence length
sub_len : int
subsequence length
seq_mu : numpy array
each subsequence's mu (mean)
seq_sig : numpy array
each subsequence's sigma (standard deviation)
Returns
-------
dist_profile : numpy array
distance profile
que_sig : float64
query's sigma (standard deviation)
Notes
-----
This functions is modified from the code provided in the following URL
http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html
"""
que = que[::-1]
que_pad = np.zeros(seq_len * 2)
que_pad[0:sub_len] = que
que_freq = np.fft.fft(que_pad)
product_freq = seq_freq * que_freq
product = | np.fft.ifft(product_freq) | numpy.fft.ifft |
''' Recurrent Models of Visual Attention
https://papers.nips.cc/paper/5542-recurrent-models-of-visual-attention.pdf
'''
from scipy.misc import imresize as resize
from minpy.nn.model_builder import *
from minpy.nn.modules import *
class CoreNetwork(Model):
def __init__(self):
super(CoreNetwork, self).__init__()
self._g_linear = FullyConnected(num_hidden=256)
self._h_linear = FullyConnected(num_hidden=256)
self._linear = FullyConnected(num_hidden=10)
def forward(self, g, h, predict=False, **kwargs):
if predict: return self._linear(h)
elif h is None: return ReLU()(self._g_linear(g))
else: return ReLU()(self._g_linear(g) + self._h_linear(h))
class GlimpseNetwork(Model):
def __init__(self, length, n_patches):
super(GlimpseNetwork, self).__init__()
self._length = length
self._n_patches = n_patches
self._g_linear0 = FullyConnected(num_hidden=128)
self._g_linear = FullyConnected(num_hidden=256)
self._l_linear0 = FullyConnected(num_hidden=128)
self._l_linear = FullyConnected(num_hidden=256)
def forward(self, images, locations, mode='training'):
if mode == 'training': self.training()
elif mode == 'inference': self.inference()
encoded = self._encode(images, locations, self._length, self._n_patches)
h_g = self._g_linear0(encoded)
h_g = ReLU()(h_g)
h_g = self._g_linear(h_g)
h_l = self._l_linear0(locations)
h_l = ReLU(h_l)
h_l = self._l_linear(h_l)
return self._linear(h_g + h_l)
@staticmethod
def encode(images, locations, length, n_patches):
N, H, V = images.shape
locations[:, 0] = locations[:, 0] * H + H / 2
locations[:, 1] = locations[:, 1] * V + V / 2
d = length / 2
images = | np.pad(images, ((0, 0), (d, d), (d, d)), mode='edge') | numpy.pad |
import matplotlib.pyplot as plt
import numpy as np
class BanditEnv:
def __init__(self, actions):
self.q_star = [np.random.randn() for i in range(actions)]
self.best_action = | np.argmax(self.q_star) | numpy.argmax |
# *_*coding:utf-8 *_*
import os
import sys
from os import makedirs
from os.path import exists, join
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from ply_helper import read_ply, write_ply
from sklearn.metrics import confusion_matrix
from metrics import IoU_from_confusions
import json
import argparse
import numpy as np
import tensorflow as tf
import socket
import importlib
import time
from pathlib import Path
from scannet_dataset_grid import ScannetDataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--data', type=str, default='../data/Scannet', help='Root for dataset')
parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 4]')
parser.add_argument('--model_path', required=True, help='model checkpoint file path')
parser.add_argument('--num_votes', type=int, default=100, help='Aggregate scores from multiple test [default: 100]')
parser.add_argument('--split', type=str, default='validation', help='[validation/test]')
parser.add_argument('--saving', action='store_true', help='Whether save test results')
parser.add_argument('--debug', action='store_true', help='Whether save test results')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
config = parser.parse_args()
with open(Path(FLAGS.model_path).parent / 'args.txt', 'r') as f:
config.__dict__ = json.load(f)
config.validation_size = 500
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = config.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
WITH_RGB = config.with_rgb
MODEL = importlib.import_module(config.model) # import network module
NUM_CLASSES = 21
HOSTNAME = socket.gethostname()
feature_channel = 3 if WITH_RGB else 0
class TimeLiner:
def __init__(self):
self._timeline_dict = None
def update_timeline(self, chrome_trace):
# convert crome trace to python dict
chrome_trace_dict = json.loads(chrome_trace)
# for first run store full trace
if self._timeline_dict is None:
self._timeline_dict = chrome_trace_dict
# for other - update only time consumption, not definitions
else:
for event in chrome_trace_dict['traceEvents']:
# events time consumption started with 'ts' prefix
if 'ts' in event:
self._timeline_dict['traceEvents'].append(event)
def save(self, f_name):
with open(f_name, 'w') as f:
json.dump(self._timeline_dict, f)
class ModelTester:
def __init__(self, pred, num_classes, saver, restore_snap=None):
self.saver = saver
cProto = tf.ConfigProto()
cProto.gpu_options.allow_growth = True
cProto.allow_soft_placement = True
cProto.log_device_placement = False
self.sess = tf.Session(config=cProto)
if (restore_snap is not None):
self.saver.restore(self.sess, restore_snap)
print("Model restored from " + restore_snap)
else:
self.sess.run(tf.global_variables_initializer())
# Add a softmax operation for predictions
self.prob_logits = tf.nn.softmax(pred[:, :, 1:])
self.num_classes = num_classes
def test_cloud_segmentation(self, input, dataset, test_init_op, num_votes=100, saving=FLAGS.saving):
# Smoothing parameter for votes
test_smooth = 0.98
# Initialise iterator with train data
self.sess.run(test_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [np.zeros((l.data.shape[0], nc_model), dtype=np.float32) for l in dataset.input_trees['test']]
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
input['labels'],
input['point_inds'],
input['cloud_inds'])
stacked_probs, labels, point_inds, cloud_inds = \
self.sess.run(ops, {input['is_training_pl']: False})
t += [time.time()]
# Stack all predictions for each class separately
for b in range(stacked_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (mean_dt[0]), 1000 * (mean_dt[1]),
np.min(dataset.min_potentials['test'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['test'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
print([np.mean(pots) for pots in dataset.potentials['test']])
if last_min + 2 < new_min:
print('Saving clouds')
# Update last_min
last_min = new_min
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
files = dataset.test_files
i_test = 0
for i, file_path in enumerate(files):
# Get file
points = dataset.load_evaluation_points(file_path)
# Reproject probs
probs = self.test_probs[i_test][dataset.test_proj[i_test], :]
# Insert false columns for ignored labels
probs2 = probs.copy()
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs2 = np.insert(probs2, l_ind, 0, axis=1)
# Get the predicted labels
preds = dataset.label_values[np.argmax(probs2, axis=1)].astype(np.int32)
# Project potentials on original points
pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]]
# Save plys
cloud_name = file_path.split('/')[-1]
test_name = join(test_path, 'predictions', cloud_name)
write_ply(test_name,
[points, preds, pots],
['x', 'y', 'z', 'preds', 'pots'])
test_name2 = join(test_path, 'probs', cloud_name)
prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values
if label not in dataset.ignored_labels]
write_ply(test_name2,
[points, probs],
['x', 'y', 'z'] + prob_names)
# Save ascii preds
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
np.savetxt(ascii_name, preds, fmt='%d')
i_test += 1
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
self.sess.run(test_init_op)
epoch_ind += 1
i0 = 0
continue
return
def test_cloud_segmentation_on_val(self, input, dataset, val_init_op, num_votes=100, saving=True):
# Smoothing parameter for votes
test_smooth = 0.95
# Initialise iterator with train data
self.sess.run(val_init_op)
# Initiate global prediction over test clouds
nc_model = self.num_classes - 1
self.test_probs = [np.zeros((l.shape[0], nc_model), dtype=np.float32)
for l in dataset.input_labels['validation']]
# Number of points per class in validation set
val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0
for label_value in dataset.label_values:
if label_value not in dataset.ignored_labels:
val_proportions[i] = np.sum([np.sum(labels == label_value)
for labels in dataset.validation_labels])
i += 1
# Test saving path
if saving:
saving_path = time.strftime('Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
test_path = join('test', saving_path)
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'val_predictions')):
makedirs(join(test_path, 'val_predictions'))
if not exists(join(test_path, 'val_probs')):
makedirs(join(test_path, 'val_probs'))
else:
test_path = None
i0 = 0
epoch_ind = 0
last_min = -0.5
mean_dt = np.zeros(2)
last_display = time.time()
while last_min < num_votes:
try:
# Run one step of the model.
t = [time.time()]
ops = (self.prob_logits,
input['labels'],
input['point_inds'],
input['cloud_inds'])
stacked_probs, labels, point_inds, cloud_inds = self.sess.run(ops, {input['is_training_pl']: False})
t += [time.time()]
# Stack all validation predictions for each class separately
for b in range(stacked_probs.shape[0]):
# Get prediction (only for the concerned parts)
probs = stacked_probs[b]
inds = point_inds[b]
c_i = cloud_inds[b]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 10.0:
last_display = t[-1]
message = 'Epoch {:3d}, step {:3d} (timings : {:4.2f} {:4.2f}). min potential = {:.1f}'
print(message.format(epoch_ind, i0, 1000 * (mean_dt[0]), 1000 * (mean_dt[1]),
np.min(dataset.min_potentials['validation'])))
i0 += 1
except tf.errors.OutOfRangeError:
# Save predicted cloud
new_min = np.min(dataset.min_potentials['validation'])
print('Epoch {:3d}, end. Min potential = {:.1f}'.format(epoch_ind, new_min))
if last_min + 1 < new_min:
# Update last_min
last_min += 1
# Show vote results (On subcloud so it is not the good values here)
print('\nConfusion on sub clouds')
Confs = []
for i_test in range(dataset.num_validation):
# Insert false columns for ignored labels
probs = self.test_probs[i_test]
for l_ind, label_value in enumerate(dataset.label_values):
if label_value in dataset.ignored_labels:
probs = | np.insert(probs, l_ind, 0, axis=1) | numpy.insert |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import os
import sys
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import match_radec
from astrometry.util.plotutils import PlotSequence
from legacyanalysis.ps1cat import ps1cat, ps1_to_decam
from legacypipe.survey import *
'''
pixsc = 0.262
apr = [1.0, 2.0, 3.5] / pixsc
#-> aperture photometry radius in pixels
decstat -- aper, img, ..., apr
-> allmags
mags = reform(allmags[2,ii])
Skyrad_pix -- default 7 to 10 pixel radius in pixels
skyrad_pix = skyrad/pixsc ; sky radii in pixels
image.py -- SE called with PIXEL_SCALE 0 -> determined by SE from header
# corresponding to diameters of [1.5,3,5,7,9,11,13,15] arcsec
# assuming 0.262 arcsec pixel scale
PHOT_APERTURES 5.7251911,11.450382,19.083969,26.717558,34.351147,41.984734,49.618320,57.251911
-> photutils aperture photometry on PsfEx image -> 1.0 at radius ~ 13;
total ~ 1.05
One of the largest differences:
z band
Photometric diff 0.0249176025391 PSF size 1.07837 expnum 292604
-> Notice that the difference is largest for *small* PSFs.
Could this be brighter-fatter?
-> Check out the region Aaron pointed to with 0.025 errors
-> Is it possible that this is coming from saturation in the zeropoints
computation (decstat)?!
-> Sky estimation?
-> Is PsfEx using any flags (SATUR) to cut candidates?
-> Look at forced phot residuals? Take model & data slices of a whole
pile of stars in expnum 292604 N4
'''
def star_profiles(ps):
# Run an example CCD, 292604-N4, with fairly large difference vs PS1.
# python -c "from astrometry.util.fits import *; T = merge_tables([fits_table('/project/projectdirs/desiproc/dr3/tractor/244/tractor-244%s.fits' % b) for b in ['2p065','4p065', '7p065']]); T.writeto('tst-cat.fits')"
# python legacypipe/forced_photom_decam.py --save-data tst-data.fits --save-model tst-model.fits 292604 N4 tst-cat.fits tst-phot.fits
# -> tst-{model,data,phot}.fits
datafn = 'tst-data.fits'
modfn = 'tst-model.fits'
photfn = 'tst-phot.fits'
catfn = 'tst-cat.fits'
img = fitsio.read(datafn)
mod = fitsio.read(modfn)
phot = fits_table(photfn)
cat = fits_table(catfn)
print(len(phot), 'forced-photometry results')
margin = 25
phot.cut((phot.x > 0+margin) * (phot.x < 2046-margin) *
(phot.y > 0+margin) * (phot.y < 4096-margin))
print(len(phot), 'in bounds')
cmap = dict([((b,o),i) for i,(b,o) in enumerate(zip(cat.brickname, cat.objid))])
I = np.array([cmap.get((b,o), -1) for b,o in zip(phot.brickname, phot.objid)])
print(np.sum(I >= 0), 'forced-phot matched cat')
phot.type = cat.type[I]
wcs = Sip(datafn)
phot.ra,phot.dec = wcs.pixelxy2radec(phot.x+1, phot.y+1)
phot.cut(np.argsort(phot.flux))
phot.sn = phot.flux * np.sqrt(phot.flux_ivar)
phot.cut(phot.sn > 5)
print(len(phot), 'with S/N > 5')
ps1 = ps1cat(ccdwcs=wcs)
stars = ps1.get_stars()
print(len(stars), 'PS1 sources')
# Now cut to just *stars* with good colors
stars.gicolor = stars.median[:,0] - stars.median[:,2]
keep = (stars.gicolor > 0.4) * (stars.gicolor < 2.7)
stars.cut(keep)
print(len(stars), 'PS1 stars with good colors')
stars.cut(np.minimum(stars.stdev[:,1], stars.stdev[:,2]) < 0.05)
print(len(stars), 'PS1 stars with min stdev(r,i) < 0.05')
I,J,d = match_radec(phot.ra, phot.dec, stars.ra, stars.dec, 1./3600.)
print(len(I), 'matches')
plt.clf()
ha=dict(histtype='step', bins=20, range=(0,100), normed=True)
plt.hist(phot.flux, color='b', **ha)
plt.hist(phot.flux[I], color='r', **ha)
ps.savefig()
plt.clf()
plt.hist(phot.flux * np.sqrt(phot.flux_ivar), bins=100,
range=(-10, 50))
plt.xlabel('Flux S/N')
ps.savefig()
K = np.argsort(phot.flux[I])
I = I[K]
J = J[K]
ix = np.round(phot.x).astype(int)
iy = np.round(phot.y).astype(int)
sz = 10
P = np.flatnonzero(phot.type == 'PSF ')
print(len(P), 'PSFs')
imed = len(P)/2
i1 = int(len(P) * 0.75)
i2 = int(len(P) * 0.25)
N = 401
allmods = []
allimgs = []
for II,tt in [#(I[:len(I)/2], 'faint matches to PS1'),
#(I[len(I)/2:], 'bright matches to PS1'),
#(P[i2: i2+N], '25th pct PSFs'),
#(P[imed: imed+N], 'median PSFs'),
#(P[i1: i1+N], '75th pct PSFs'),
#(P[-25:], 'brightest PSFs'),
(P[i2:imed], '2nd quartile of PSFs'),
(P[imed:i1], '3rd quartile of PSFs'),
#(P[:len(P)/2], 'faint half of PSFs'),
#(P[len(P)/2:], 'bright half of PSFs'),
]:
imgs = []
mods = []
shimgs = []
shmods = []
imgsum = modsum = 0
#plt.clf()
for i in II:
from astrometry.util.util import lanczos_shift_image
dy = phot.y[i] - iy[i]
dx = phot.x[i] - ix[i]
sub = img[iy[i]-sz : iy[i]+sz+1, ix[i]-sz : ix[i]+sz+1]
shimg = lanczos_shift_image(sub, -dx, -dy)
sub = mod[iy[i]-sz : iy[i]+sz+1, ix[i]-sz : ix[i]+sz+1]
shmod = lanczos_shift_image(sub, -dx, -dy)
iyslice = img[iy[i], ix[i]-sz : ix[i]+sz+1]
myslice = mod[iy[i], ix[i]-sz : ix[i]+sz+1]
ixslice = img[iy[i]-sz : iy[i]+sz+1, ix[i]]
mxslice = mod[iy[i]-sz : iy[i]+sz+1, ix[i]]
mx = iyslice.max()
# plt.plot(iyslice/mx, 'b-', alpha=0.1)
# plt.plot(myslice/mx, 'r-', alpha=0.1)
# plt.plot(ixslice/mx, 'b-', alpha=0.1)
# plt.plot(mxslice/mx, 'r-', alpha=0.1)
siyslice = shimg[sz, :]
sixslice = shimg[:, sz]
smyslice = shmod[sz, :]
smxslice = shmod[:, sz]
shimgs.append(siyslice/mx)
shimgs.append(sixslice/mx)
shmods.append(smyslice/mx)
shmods.append(smxslice/mx)
imgs.append(iyslice/mx)
imgs.append(ixslice/mx)
mods.append(myslice/mx)
mods.append(mxslice/mx)
imgsum = imgsum + ixslice + iyslice
modsum = modsum + mxslice + myslice
# plt.ylim(-0.1, 1.1)
# plt.title(tt)
# ps.savefig()
mimg = np.median(np.array(imgs), axis=0)
mmod = np.median(np.array(mods), axis=0)
mshim = np.median(np.array(shimgs), axis=0)
mshmo = np.median(np.array(shmods), axis=0)
allmods.append(mshmo)
allimgs.append(mshim)
plt.clf()
# plt.plot(mimg, 'b-')
# plt.plot(mmod, 'r-')
plt.plot(mshim, 'g-')
plt.plot(mshmo, 'm-')
plt.ylim(-0.1, 1.1)
plt.title(tt + ': median; sums %.3f/%.3f' % (np.sum(mimg), np.sum(mmod)))
ps.savefig()
# plt.clf()
# mx = imgsum.max()
# plt.plot(imgsum/mx, 'b-')
# plt.plot(modsum/mx, 'r-')
# plt.ylim(-0.1, 1.1)
# plt.title(tt + ': sum')
# ps.savefig()
plt.clf()
plt.plot((mimg + 0.01) / (mmod + 0.01), 'k-')
plt.plot((imgsum/mx + 0.01) / (modsum/mx + 0.01), 'g-')
plt.plot((mshim + 0.01) / (mshmo + 0.01), 'm-')
plt.ylabel('(img + 0.01) / (mod + 0.01)')
plt.title(tt)
ps.savefig()
iq2,iq3 = allimgs
mq2,mq3 = allmods
plt.clf()
plt.plot(iq2, 'r-')
plt.plot(mq2, 'm-')
plt.plot(iq3, 'b-')
plt.plot(mq3, 'g-')
plt.title('Q2 vs Q3')
ps.savefig()
def main():
# ps = PlotSequence('pro')
# star_profiles(ps)
# sys.exit(0)
#survey_dir = '/project/projectdirs/desiproc/dr3'
#survey = LegacySurveyData(survey_dir=survey_dir)
survey = LegacySurveyData()
ralo,rahi = 240,245
declo,dechi = 5, 12
ps = PlotSequence('comp')
bands = 'grz'
ccdfn = 'ccds-forced.fits'
if not os.path.exists(ccdfn):
ccds = survey.get_annotated_ccds()
ccds.cut((ccds.ra > ralo) * (ccds.ra < rahi) *
(ccds.dec > declo) * (ccds.dec < dechi))
print(len(ccds), 'CCDs')
ccds.path = np.array([os.path.join(#'dr3',
'forced', ('%08i' % e)[:5], '%08i' % e, 'decam-%08i-%s-forced.fits' % (e, n.strip()))
for e,n in zip(ccds.expnum, ccds.ccdname)])
I, = np.nonzero([os.path.exists(fn) for fn in ccds.path])
print(len(I), 'CCDs with forced photometry')
ccds.cut(I)
#ccds = ccds[:500]
#e,I = np.unique(ccds.expnum, return_index=True)
#print(len(I), 'unique exposures')
#ccds.cut(I)
FF = read_forcedphot_ccds(ccds, survey)
FF.writeto('forced-all-matches.fits')
# - z band -- no trend w/ PS1 mag (brighter-fatter)
ccds.writeto(ccdfn)
ccdfn2 = 'ccds-forced-2.fits'
if not os.path.exists(ccdfn2):
ccds = fits_table(ccdfn)
# Split into brighter/fainter halves
FF = fits_table('forced-all-matches.fits')
print(len(FF), 'forced measurements')
FF.cut(FF.masked == False)
print(len(FF), 'forced measurements not masked')
ccds.brightest_mdiff = np.zeros(len(ccds))
ccds.brightest_mscatter = np.zeros(len(ccds))
ccds.bright_mdiff = np.zeros(len(ccds))
ccds.bright_mscatter = np.zeros(len(ccds))
ccds.faint_mdiff = np.zeros(len(ccds))
ccds.faint_mscatter = np.zeros(len(ccds))
for iccd in range(len(ccds)):
I = np.flatnonzero(FF.iforced == iccd)
if len(I) == 0:
continue
if len(I) < 10:
continue
F = FF[I]
b = np.percentile(F.psmag, 10)
m = np.median(F.psmag)
print(len(F), 'matches for CCD', iccd, 'median mag', m, '10th pct', b)
J = np.flatnonzero(F.psmag < b)
diff = F.mag[J] - F.psmag[J]
ccds.brightest_mdiff[iccd] = np.median(diff)
ccds.brightest_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
J = np.flatnonzero(F.psmag < m)
diff = F.mag[J] - F.psmag[J]
ccds.bright_mdiff[iccd] = np.median(diff)
ccds.bright_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
J = np.flatnonzero(F.psmag > m)
diff = F.mag[J] - F.psmag[J]
ccds.faint_mdiff[iccd] = np.median(diff)
ccds.faint_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
ccds.writeto(ccdfn2)
ccds = fits_table(ccdfn2)
plt.clf()
plt.hist(ccds.nforced, bins=100)
plt.title('nforced')
ps.savefig()
plt.clf()
plt.hist(ccds.nmatched, bins=100)
plt.title('nmatched')
ps.savefig()
#ccds.cut(ccds.nmatched >= 150)
ccds.cut(ccds.nmatched >= 50)
print('Cut to', len(ccds), 'with >50 matched')
ccds.cut(ccds.photometric)
print('Cut to', len(ccds), 'photometric')
neff = 1. / ccds.psfnorm_mean**2
# Narcsec is in arcsec**2
narcsec = neff * ccds.pixscale_mean**2
# to arcsec
narcsec = np.sqrt(narcsec)
# Correction factor to get back to equivalent of Gaussian sigma
narcsec /= (2. * np.sqrt(np.pi))
# Conversion factor to FWHM (2.35)
narcsec *= 2. * np.sqrt(2. * np.log(2.))
ccds.psfsize = narcsec
for band in bands:
I = np.flatnonzero((ccds.filter == band)
* (ccds.photometric) * (ccds.blacklist_ok))
mlo,mhi = -0.01, 0.05
plt.clf()
plt.plot(ccds.ccdzpt[I],
ccds.exptime[I], 'k.', alpha=0.1)
J = np.flatnonzero((ccds.filter == band) * (ccds.photometric == False))
plt.plot(ccds.ccdzpt[J],
ccds.exptime[J], 'r.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('exptime')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
plt.clf()
plt.plot(ccds.ccdzpt[I],
np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
#plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
plt.clf()
plt.plot(ccds.ccdzpt[I], ccds.psfsize[I], 'k.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('PSF size (arcsec)')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
# plt.clf()
# plt.plot(ccds.avsky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('avsky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
#
# plt.clf()
# plt.plot(ccds.meansky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('meansky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
# plt.clf()
# plt.plot(ccds.avsky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('avsky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
#
# plt.clf()
# plt.plot(ccds.meansky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('meansky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
plt.clf()
lo,hi = (-0.02, 0.05)
ha = dict(bins=50, histtype='step', range=(lo,hi))
n,b,p1 = plt.hist(ccds.brightest_mdiff[I], color='r', **ha)
n,b,p2 = plt.hist(ccds.bright_mdiff[I], color='g', **ha)
n,b,p3 = plt.hist(ccds.faint_mdiff[I], color='b', **ha)
plt.legend((p1[0],p2[0],p3[0]), ('Brightest 10%', 'Brightest 50%',
'Faintest 50%'))
plt.xlabel('DECaLS PSF - PS1 (mag)')
plt.ylabel('Number of CCDs')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
plt.xlim(lo,hi)
ps.savefig()
for band in bands:
I = np.flatnonzero(ccds.filter == band)
mxsee = 4.
mlo,mhi = -0.01, 0.05
plt.clf()
plt.plot(np.clip(ccds.psfsize[I], 0, mxsee),
np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# for p in [1,2,3]:
# J = np.flatnonzero(ccds.tilepass[I] == p)
# if len(J):
# plt.plot(np.clip(ccds.psfsize[I[J]], 0, mxsee),
# np.clip(ccds.mdiff[I[J]], mlo,mhi), '.', color='rgb'[p-1], alpha=0.2)
#plt.plot(ccds.seeing[I], ccds.mdiff[I], 'b.')
plt.xlabel('PSF size (arcsec)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
# Group by exposure
for band in bands:
I = np.flatnonzero((ccds.filter == band)
* (ccds.photometric) * (ccds.blacklist_ok))
E,J = np.unique(ccds.expnum[I], return_index=True)
print(len(E), 'unique exposures in', band)
exps = ccds[I[J]]
print(len(exps), 'unique exposures in', band)
assert(len(np.unique(exps.expnum)) == len(exps))
exps.ddiff = np.zeros(len(exps))
exps.dsize = np.zeros(len(exps))
exps.nccds = np.zeros(len(exps), int)
exps.brightest_ddiff = np.zeros(len(exps))
exps.bright_ddiff = np.zeros(len(exps))
exps.faint_ddiff = np.zeros(len(exps))
for iexp,exp in enumerate(exps):
J = np.flatnonzero(ccds.expnum[I] == exp.expnum)
J = I[J]
print(len(J), 'CCDs in exposure', exp.expnum)
exps.brightest_mdiff[iexp] = np.median(ccds.brightest_mdiff[J])
exps.bright_mdiff[iexp] = np.median(ccds.bright_mdiff[J])
exps.faint_mdiff[iexp] = np.median(ccds.faint_mdiff[J])
exps.brightest_ddiff[iexp] = (
np.percentile(ccds.brightest_mdiff[J], 84) -
np.percentile(ccds.brightest_mdiff[J], 16))/2.
exps.bright_ddiff[iexp] = (
np.percentile(ccds.bright_mdiff[J], 84) -
np.percentile(ccds.bright_mdiff[J], 16))/2.
exps.faint_ddiff[iexp] = (
np.percentile(ccds.faint_mdiff[J], 84) -
np.percentile(ccds.faint_mdiff[J], 16))/2.
exps.mdiff[iexp] = | np.median(ccds.mdiff[J]) | numpy.median |
"""
Module of functions involving great circles
(thus assuming spheroid model of the earth)
with points given in longitudes and latitudes.
"""
from __future__ import print_function
import math
import numpy
import numpy.random
# Equatorial radius of the earth in kilometers
EARTH_ER = 6378.137
# Authalic radius of the earth in kilometers
EARTH_AR = 6371.007
# Meridional radius of the earth in kilometers
EARTH_MR = 6367.449
# Polar radius of the earth in kilometers
EARTH_PR = 6356.752
DEG2RAD = math.pi / 180.0
RAD2DEG = 180.0 / math.pi
KM2MI = 0.6213712
MI2KM = 1.609344
def lonlatdistance(pt1lon, pt1lat, pt2lon, pt2lat):
"""
Compute the great circle distance between two points
on a sphere using the haversine formula.
Arguments:
pt1lon - longitude(s) of the first point
pt1lat - latitude(s) of the first point
pt2lon - longitude(s) of the second point
pt2lat - latitude(s) of the second point
Returns:
The great circle distance(s) in degrees [0.0, 180.0]
"""
lon1 = numpy.deg2rad(numpy.asarray(pt1lon, dtype=float))
lat1 = numpy.deg2rad(numpy.asarray(pt1lat, dtype=float))
lon2 = numpy.deg2rad(numpy.asarray(pt2lon, dtype=float))
lat2 = numpy.deg2rad(numpy.asarray(pt2lat, dtype=float))
dellat = numpy.power(numpy.sin(0.5 * (lat2 - lat1)), 2.0)
dellon = numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.power(numpy.sin(0.5 * (lon2 - lon1)), 2.0)
dist = 2.0 * numpy.arcsin(numpy.power(dellon + dellat, 0.5))
return numpy.rad2deg(dist)
def lonlatintersect(gc1lon1, gc1lat1, gc1lon2, gc1lat2,
gc2lon1, gc2lat1, gc2lon2, gc2lat2):
"""
Compute the intersections of two great circles. Uses the line of
intersection between the two planes of the great circles.
Arguments:
gc1lon1 - longitude(s) of the first point on the first great circle
gc1lat1 - latitude(s) of the first point on the first great circle
gc1lon2 - longitude(s) of the second point on the first great circle
gc1lat2 - latitude(s) of the second point on the first great circle
gc2lon1 - longitude(s) of the first point on the second great circle
gc2lat1 - latitude(s) of the first point on the second great circle
gc2lon2 - longitude(s) of the second point on the second great circle
gc2lat2 - latitude(s) of the second point on the second great circle
Returns:
( (pt1lon, pt1lat), (pt2lon, pt2lat) ) - the longitudes and latitudes
of the two intersections of the two great circles. NaN will
be returned for both longitudes and latitudes if a great
circle is not well-defined, or the two great-circles coincide.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz2 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz2 = numpy.array([gcx, gcy, gcz])
# Get the unit-perpendicular to the plane going through the
# origin and the two points on each great circle. If the
# norm of the cross product is too small, the great circle
# is not well-defined, so zero it out so NaN is produced.
gc1pp = numpy.cross(gc1xyz1, gc1xyz2, axis=0)
norm = (gc1pp[0]**2 + gc1pp[1]**2 + gc1pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc1pp /= norm
gc2pp = numpy.cross(gc2xyz1, gc2xyz2, axis=0)
norm = (gc2pp[0]**2 + gc2pp[1]**2 + gc2pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc2pp /= norm
# The line of intersection of the two planes is perpendicular
# to the two plane-perpendiculars and goes through the origin.
# Points of intersection are the points on this line one unit
# from the origin. If the norm of the cross product is too
# small, the two planes are practically indistinguishable from
# each other (coincide).
pt1xyz = numpy.cross(gc1pp, gc2pp, axis=0)
norm = (pt1xyz[0]**2 + pt1xyz[1]**2 + pt1xyz[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
pt1xyz /= norm
pt2xyz = -1.0 * pt1xyz
# Convert back to longitudes and latitudes
pt1lats = numpy.rad2deg(numpy.arcsin(pt1xyz[2]))
pt1lons = numpy.rad2deg(numpy.arctan2(pt1xyz[1], pt1xyz[0]))
pt2lats = numpy.rad2deg(numpy.arcsin(pt2xyz[2]))
pt2lons = numpy.rad2deg(numpy.arctan2(pt2xyz[1], pt2xyz[0]))
return ( (pt1lons, pt1lats), (pt2lons, pt2lats) )
def lonlatfwdpt(origlon, origlat, endlon, endlat, fwdfact):
"""
Find the longitude and latitude of a point that is a given factor
times the distance along the great circle from an origination point
to an ending point.
Note that the shorter great circle arc from the origination point
to the ending point is always used.
If O is the origination point, E is the ending point, and P is
the point returned from this computation, a factor value of:
0.5: P bisects the great circle arc between O and E
2.0: E bisects the great circle arc between O and P
-1.0: O bisects the great circle arc between P and E
Arguments:
origlon - longitude(s) of the origination point
origlat - latitude(s) of the origination point
endlon - longitude(s) of the ending point
endlat - latitude(s) of the ending point
fwdfact - forward distance factor(s)
Returns:
(ptlon, ptlat) - longitude and latitude of the computed point(s).
NaN will be returned for both the longitude and
latitude if the great circle is not well-defined.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(origlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(origlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
origxyz = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(endlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(endlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
endxyz = numpy.array([gcx, gcy, gcz])
# Determine the rotation matrix about the origin that takes
# origxyz to (1,0,0) (equator and prime meridian) and endxyz
# to (x,y,0) with y > 0 (equator in eastern hemisphere).
#
# The first row of the matrix is origxyz.
#
# The third row of the matrix is the normalized cross product
# of origxyz and endxyz. (The great circle plane perpendicular.)
# If the norm of this cross product is too small, the great
# circle is not well-defined, so zero it out so NaN is produced.
gcpp = numpy.cross(origxyz, endxyz, axis=0)
norm = (gcpp[0]**2 + gcpp[1]**2 + gcpp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gcpp /= norm
# The second row of the matrix is the cross product of the
# third row (gcpp) and the first row (origxyz). This will
# have norm 1.0 since gcpp and origxyz are perpendicular
# unit vectors.
fwdax = numpy.cross(gcpp, origxyz, axis=0)
# Get the coordinates of the rotated end point.
endtrx = origxyz[0] * endxyz[0] + origxyz[1] * endxyz[1] + origxyz[2] * endxyz[2]
endtry = fwdax[0] * endxyz[0] + fwdax[1] * endxyz[1] + fwdax[2] * endxyz[2]
# Get the angle along the equator of the rotated end point, multiply
# by the given factor, and convert this new angle back to coordinates.
fwdang = numpy.arctan2(endtry, endtrx)
fwdang *= numpy.asarray(fwdfact, dtype=float)
fwdtrx = numpy.cos(fwdang)
fwdtry = numpy.sin(fwdang)
# Rotate the new point back to the original coordinate system
# The inverse rotation matrix is the transpose of that matrix.
fwdx = origxyz[0] * fwdtrx + fwdax[0] * fwdtry
fwdy = origxyz[1] * fwdtrx + fwdax[1] * fwdtry
fwdz = origxyz[2] * fwdtrx + fwdax[2] * fwdtry
# Convert the point coordinates into longitudes and latitudes
ptlat = numpy.rad2deg( | numpy.arcsin(fwdz) | numpy.arcsin |
import numpy as np
from scipy.io import wavfile
import wave
import librosa
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from tqdm import tqdm
X_SIZE = 16000
IMG_SIZE = 28
DATA_PATH = "./data/"
# Input labels
def get_labels(path=DATA_PATH):
labels = os.listdir(path)
label_indices = np.arange(0, len(labels))
return labels, label_indices, to_categorical(label_indices)
# Convert
def wav2mfcc(file_path, max_len=11):
wave, sr = librosa.load(file_path, mono=True, sr=None)
# wave = wave[::3]
# wave = librosa.effects.pitch_shift(wave, sr, n_steps=4)
return sr, wave
# Get spectrogram
def spectrogram(filepath):
framerate, wav_data = wav2mfcc(filepath)
window_length = 512
window_shift = 121
if len(wav_data) > X_SIZE:
wav_data = wav_data[:X_SIZE]
X = np.zeros(X_SIZE).astype('float32')
X[:len(wav_data)] += wav_data
spec = np.zeros((IMG_SIZE, IMG_SIZE)).astype('float32')
for i in range(IMG_SIZE):
start = i * window_shift
end = start + window_length
sig = np.abs(np.fft.rfft(X[start:end] * np.hanning(window_length)))
spec[:,i] = (sig[1:IMG_SIZE + 1])[::-1]
spec = (spec-spec.min())/(spec.max()-spec.min())
spec = np.log10((spec * 100 + 0.01))
spec = (spec-spec.min())/(spec.max()-spec.min()) - 0.5
return spec
# Save to .npy
def save_data_to_array(path=DATA_PATH):
labels, _, _ = get_labels(path)
for label in labels:
# Init mfcc vectors
mfcc_vectors = []
wavfiles = [path + label + '/' + wavfile for wavfile in os.listdir(path + '/' + label)]
for wavfile in tqdm(wavfiles, "Saving vectors of label - '{}'".format(label)):
mfcc = spectrogram(wavfile)
mfcc_vectors.append(mfcc)
| np.save(label + 'spec.npy', mfcc_vectors) | numpy.save |
'''
Utilities that are useful to sub- or up-sample weights tensors.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy as np
def sample_tensors(weights_list, sampling_instructions, axes=None, init=None, mean=0.0, stddev=0.005):
'''
Can sub-sample and/or up-sample individual dimensions of the tensors in the given list
of input tensors.
It is possible to sub-sample some dimensions and up-sample other dimensions at the same time.
The tensors in the list will be sampled consistently, i.e. for any given dimension that
corresponds among all tensors in the list, the same elements will be picked for every tensor
along that dimension.
For dimensions that are being sub-sampled, you can either provide a list of the indices
that should be picked, or you can provide the number of elements to be sub-sampled, in which
case the elements will be chosen at random.
For dimensions that are being up-sampled, "filler" elements will be insterted at random
positions along the respective dimension. These filler elements will be initialized either
with zero or from a normal distribution with selectable mean and standard deviation.
Arguments:
weights_list (list): A list of Numpy arrays. Each array represents one of the tensors
to be sampled. The tensor with the greatest number of dimensions must be the first
element in the list. For example, in the case of the weights of a 2D convolutional
layer, the kernel must be the first element in the list and the bias the second,
not the other way around. For all tensors in the list after the first tensor, the
lengths of each of their axes must identical to the length of some axis of the
first tensor.
sampling_instructions (list): A list that contains the sampling instructions for each
dimension of the first tensor. If the first tensor has `n` dimensions, then this
must be a list of length `n`. That means, sampling instructions for every dimension
of the first tensor must still be given even if not all dimensions should be changed.
The elements of this list can be either lists of integers or integers. If the sampling
instruction for a given dimension is a list of integers, then these integers represent
the indices of the elements of that dimension that will be sub-sampled. If the sampling
instruction for a given dimension is an integer, then that number of elements will be
sampled along said dimension. If the integer is greater than the number of elements
of the input tensors in that dimension, that dimension will be up-sampled. If the integer
is smaller than the number of elements of the input tensors in that dimension, that
dimension will be sub-sampled. If the integer is equal to the number of elements
of the input tensors in that dimension, that dimension will remain the same.
axes (list, optional): Only relevant if `weights_list` contains more than one tensor.
This list contains a list for each additional tensor in `weights_list` beyond the first.
Each of these lists contains integers that determine to which axes of the first tensor
the axes of the respective tensor correspond. For example, let the first tensor be a
4D tensor and the second tensor in the list be a 2D tensor. If the first element of
`axis` is the list `[2,3]`, then that means that the two axes of the second tensor
correspond to the last two axes of the first tensor, in the same order. The point of
this list is for the program to know, if a given dimension of the first tensor is to
be sub- or up-sampled, which dimensions of the other tensors in the list must be
sub- or up-sampled accordingly.
init (list, optional): Only relevant for up-sampling. Must be `None` or a list of strings
that determines for each tensor in `weights_list` how the newly inserted values should
be initialized. The possible values are 'gaussian' for initialization from a normal
distribution with the selected mean and standard deviation (see the following two arguments),
or 'zeros' for zero-initialization. If `None`, all initializations default to
'gaussian'.
mean (float, optional): Only relevant for up-sampling. The mean of the values that will
be inserted into the tensors at random in the case of up-sampling.
stddev (float, optional): Only relevant for up-sampling. The standard deviation of the
values that will be inserted into the tensors at random in the case of up-sampling.
Returns:
A list containing the sampled tensors in the same order in which they were given.
'''
first_tensor = weights_list[0]
if (not isinstance(sampling_instructions, (list, tuple))) or (len(sampling_instructions) != first_tensor.ndim):
raise ValueError(
"The sampling instructions must be a list whose length is the number of dimensions of the first tensor in `weights_list`.")
if (not init is None) and len(init) != len(weights_list):
raise ValueError(
"`init` must either be `None` or a list of strings that has the same length as `weights_list`.")
up_sample = [] # Store the dimensions along which we need to up-sample.
out_shape = [] # Store the shape of the output tensor here.
# Store two stages of the new (sub-sampled and/or up-sampled) weights tensors in the following two lists.
subsampled_weights_list = [] # Tensors after sub-sampling, but before up-sampling (if any).
upsampled_weights_list = [] # Sub-sampled tensors after up-sampling (if any), i.e. final output tensors.
# Create the slicing arrays from the sampling instructions.
sampling_slices = []
for i, sampling_inst in enumerate(sampling_instructions):
if isinstance(sampling_inst, (list, tuple)):
amax = np.amax(np.array(sampling_inst))
if amax >= first_tensor.shape[i]:
raise ValueError(
"The sample instructions for dimension {} contain index {}, which is greater than the length of that dimension.".format(
i, amax))
sampling_slices.append(np.array(sampling_inst))
out_shape.append(len(sampling_inst))
elif isinstance(sampling_inst, int):
out_shape.append(sampling_inst)
if sampling_inst == first_tensor.shape[i]:
# Nothing to sample here, we're keeping the original number of elements along this axis.
sampling_slice = | np.arange(sampling_inst) | numpy.arange |
"""
Tests to make sure deepchem models can overfit on tiny datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
import scipy.io
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from flaky import flaky
class TestOverfit(test_util.TensorFlowTestCase):
"""
Test that models can overfit simple datasets.
"""
def setUp(self):
super(TestOverfit, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_sklearn_regression_overfit(self):
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit(self):
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit(self):
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_regression_overfit(self):
"""Test that TensorFlow models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_regression_overfit(self):
"""Test that TensorGraph models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorGraphMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_classification_overfit(self):
"""Test that tensorflow models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tg_classification_overfit(self):
"""Test that TensorGraph models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer,
learning_rate=0.0003,
beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_fittransform_regression_overfit(self):
"""Test that TensorFlow FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tg_fittransform_regression_overfit(self):
"""Test that TensorGraph FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorGraphMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_skewed_classification_overfit(self):
"""Test tensorflow models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tg_skewed_classification_overfit(self):
"""Test TensorGraph models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorGraphMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples)
model.set_optimizer(
dc.models.tensorgraph.tensor_graph.TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.003, beta1=0.9,
beta2=0.999))
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tf_skewed_missing_classification_overfit(self):
"""TF, skewed data, few actives
Test tensorflow models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[1.],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
def test_tg_skewed_missing_classification_overfit(self):
"""TG, skewed data, few actives
Test TensorGraph models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = | np.squeeze(y) | numpy.squeeze |
#!/usr/bin/python
import argparse
import numpy as np
import arrow
import PIL
from tensorrtserver.api import ServerStatusContext, ProtocolType, InferContext
import tensorrtserver.api.model_config_pb2 as model_config
from bistiming import Stopwatch
from eyewitness.detection_utils import DetectionResult
from eyewitness.image_id import ImageId
from eyewitness.config import BoundedBoxObject
from eyewitness.object_detector import ObjectDetector
from eyewitness.image_utils import ImageHandler, Image, resize_and_stack_image_objs
from data_processing import (PostprocessYOLO, ALL_CATEGORIES, CATEGORY_NUM)
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-a', '--is_async', action="store_true", required=False, default=False,
help='Use asynchronous inference API')
parser.add_argument('--streaming', action="store_true", required=False, default=False,
help='Use streaming inference API. ' +
'The flag is only available with gRPC protocol.')
parser.add_argument('-m', '--model-name', type=str, required=True,
help='Name of model')
parser.add_argument('-x', '--model-version', type=int, required=False,
help='Version of model. Default is to use latest version.')
parser.add_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='HTTP',
help='Protocol (HTTP/gRPC) used to ' +
'communicate with inference service. Default is HTTP.')
parser.add_argument('image_filename', type=str, nargs='?', default=None,
help='Input image / Input folder.')
def model_dtype_to_np(model_dtype):
if model_dtype == model_config.TYPE_BOOL:
return np.bool
elif model_dtype == model_config.TYPE_INT8:
return np.int8
elif model_dtype == model_config.TYPE_INT16:
return np.int16
elif model_dtype == model_config.TYPE_INT32:
return np.int32
elif model_dtype == model_config.TYPE_INT64:
return np.int64
elif model_dtype == model_config.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config.TYPE_FP16:
return np.float16
elif model_dtype == model_config.TYPE_FP32:
return np.float32
elif model_dtype == model_config.TYPE_FP64:
return np.float64
elif model_dtype == model_config.TYPE_STRING:
return np.dtype(object)
return None
def parse_model(url, protocol, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
ctx = ServerStatusContext(url, protocol, model_name, verbose)
server_status = ctx.get_server_status()
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got {}".format(len(config.input)))
input = config.input[0]
for output in config.output:
if output.data_type != model_config.TYPE_FP32:
raise Exception("expecting output datatype to be TYPE_FP32, model '" +
model_name + "' output type is " +
model_config.DataType.Name(output.data_type))
output_names = [output.name for output in config.output]
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception("expecting batch size <= {} for model {}".format(
max_batch_size, model_name))
# Model input must have 3 dims, either CHW or HWC
if len(input.dims) != 3:
raise Exception(
"expecting input to have 3 dimensions, model '{}' input has {}".format(
model_name, len(input.dims)))
# Variable-size dimensions are not currently supported.
for dim in input.dims:
if dim == -1:
raise Exception("variable-size dimension in model input not supported")
if ((input.format != model_config.ModelInput.FORMAT_NCHW) and
(input.format != model_config.ModelInput.FORMAT_NHWC)):
raise Exception(
"unexpected input format "
+ model_config.ModelInput.Format.Name(input.format)
+ ", expecting "
+ model_config.ModelInput.Format.Name(model_config.ModelInput.FORMAT_NCHW)
+ " or "
+ model_config.ModelInput.Format.Name(model_config.ModelInput.FORMAT_NHWC))
if input.format == model_config.ModelInput.FORMAT_NHWC:
h = input.dims[0]
w = input.dims[1]
c = input.dims[2]
else:
c = input.dims[0]
h = input.dims[1]
w = input.dims[2]
return (input.name, output_names, c, h, w, input.format, model_dtype_to_np(input.data_type))
def preprocess(img, format, dtype, c, h, w):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
# np.set_printoptions(threshold='nan')
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((w, h), PIL.Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:, :, np.newaxis]
typed = resized.astype(dtype)
scaled = typed / 256
# Swap to CHW if necessary
if format == model_config.ModelInput.FORMAT_NCHW:
ordered = np.transpose(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return ordered
class YoloV3DetectorTensorRTClient(ObjectDetector):
def __init__(self, model_setting, threshold=0.14):
# get the model setting
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
protocol = ProtocolType.from_str(model_setting.protocol)
if model_setting.streaming and protocol != ProtocolType.GRPC:
raise Exception("Streaming is only allowed with gRPC protocol")
self.input_name, self.output_names, c, h, w, format, dtype = parse_model(
model_setting.url, protocol, model_setting.model_name,
model_setting.batch_size, model_setting.verbose)
self.ctx = InferContext(model_setting.url, protocol, model_setting.model_name,
model_setting.model_version, model_setting.verbose, 0,
model_setting.streaming)
self.image_shape = (h, w)
layer_output = CATEGORY_NUM * 3 + 15
self.output_shapes = [
(1, layer_output, *(int(i / 32) for i in self.image_shape)),
(1, layer_output, *(int(i / 16) for i in self.image_shape)),
(1, layer_output, *(int(i / 8) for i in self.image_shape))
]
# self.engine_file = engine_file
self.threshold = threshold
postprocessor_args = {
# A list of 3 three-dimensional tuples for the YOLO masks
"yolo_masks": [(6, 7, 8), (3, 4, 5), (0, 1, 2)],
# A list of 9 two-dimensional tuples for the YOLO anchors
"yolo_anchors": [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
# Threshold for object coverage, float value between 0 and 1
"obj_threshold": self.threshold,
# Threshold for non-max suppression algorithm, float value between 0 and 1
"nms_threshold": 0.5,
"yolo_input_resolution": self.image_shape}
self.postprocessor = PostprocessYOLO(**postprocessor_args)
def detect(self, image_obj) -> DetectionResult:
image_raw_width = image_obj.pil_image_obj.width
image_raw_height = image_obj.pil_image_obj.height
image_frame, scale_ratio = self.preprocess(image_obj.pil_image_obj)
input_batch = [image_frame]
output_dict = {
output_name: InferContext.ResultFormat.RAW
for output_name in self.output_names
}
# Send request
response = self.ctx.run(
{self.input_name: input_batch}, output_dict, model_setting.batch_size)
trt_outputs = [response[output][0] for output in sorted(response.keys())]
# Before doing post-processing,
# we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [output.reshape(shape)
for output, shape in zip(trt_outputs, self.output_shapes)]
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box
# details of detected objects
boxes, classes, scores = self.postprocessor.process(
trt_outputs, tuple(int(i / scale_ratio) for i in self.image_shape))
detected_objects = []
if all(i.shape[0] for i in [boxes, scores, classes]):
for bbox, score, label_class in zip(boxes, scores, classes):
label = ALL_CATEGORIES[label_class]
x_coord, y_coord, width, height = bbox
x1 = max(0, np.floor(x_coord + 0.5).astype(int))
y1 = max(0, np.floor(y_coord + 0.5).astype(int))
x2 = min(image_raw_width, np.floor(x_coord + width + 0.5).astype(int))
y2 = min(image_raw_height, np.floor(y_coord + height + 0.5).astype(int))
# handle the edge case of padding space
x1 = min(image_raw_width, x1)
x2 = min(image_raw_width, x2)
if x1 == x2:
continue
y1 = min(image_raw_height, y1)
y2 = min(image_raw_height, y2)
if y1 == y2:
continue
detected_objects.append(BoundedBoxObject(x1, y1, x2, y2, label, score, ''))
image_dict = {
'image_id': image_obj.image_id,
'detected_objects': detected_objects,
}
detection_result = DetectionResult(image_dict)
return detection_result
def preprocess(self, pil_image_obj):
"""
since the tensorRT engine with a fixed input shape, and we don't want to resize the
original image directly, thus we perform a way like padding and resize the original image
to align the long side to the tensorrt input
Parameters
----------
pil_image_obj: PIL.image.object
Returns
-------
image: np.array
np.array with shape: NCHW, value between 0~1
image_resized_shape: (Int, Int)
resized image size, (height, weight)
"""
original_image_size = (pil_image_obj.width, pil_image_obj.height)
width_scale_weight = original_image_size[0] / self.image_shape[0]
height_scale_weight = original_image_size[1] / self.image_shape[1]
scale_ratio = min(width_scale_weight, height_scale_weight)
image_resized_shape = tuple(int(i * scale_ratio) for i in original_image_size)
output_img = np.zeros((3, *self.image_shape))
processed_image = resize_and_stack_image_objs(
image_resized_shape, [pil_image_obj]) # NHWC
processed_image = | np.transpose(processed_image, [0, 3, 1, 2]) | numpy.transpose |
"""Resynthesis of signals described as sinusoid tracks."""
import numpy as np
def synthtrax(F, M, SR, SUBF=128, DUR=0):
"""
% X = synthtrax(F, M, SR, SUBF, DUR) Reconstruct a sound from track rep'n.
% Each row of F and M contains a series of frequency and magnitude
% samples for a particular track. These will be remodulated and
% overlaid into the output sound X which will run at sample rate SR,
% although the columns in F and M are subsampled from that rate by
% a factor SUBF (default 128). If DUR is nonzero, X will be padded or
% truncated to correspond to just this much time.
% <EMAIL> 1994aug20, 1996aug22
"""
rows, cols = F.shape
opsamps = int(np.round(DUR * SR))
if not DUR:
opsamps = cols * SUBF
X = np.zeros(opsamps)
for row in xrange(rows):
mm = M[row]
ff = F[row]
# First, find onsets - points where mm goes from zero (or NaN) to nzero
# Before that, even, set all nan values of mm to zero
nzv = np.nonzero(mm)[0]
firstcol = np.min(nzv)
lastcol = np.max(nzv)
# for speed, chop off regions of initial and final zero magnitude -
# but want to include one zero from each end if they are there
zz = np.arange(np.maximum(0, firstcol-1), np.minimum(cols, lastcol+1))
nzcols = zz.shape[0]
if nzcols > 0:
mm = mm[zz]
ff = ff[zz]
mz = mm == 0
# Copy frequency values to one point past each end of nonzero stretches.
onsets = np.nonzero(np.logical_and(mz > 0, np.hstack(
[1, mz[:-1]]) == 0))[0]
ff[onsets - 1] = ff[onsets]
offsets = np.nonzero(np.logical_and(mz[:-1] > 0, mz[1:] == 0))[0]
ff[offsets + 1] = ff[offsets]
# Do interpolation.
ff = np.interp(np.arange(ff.shape[0] * SUBF)/float(SUBF),
np.arange(ff.shape[0]), ff)
mm = np.interp(np.arange(mm.shape[0] * SUBF)/float(SUBF),
| np.arange(mm.shape[0]) | numpy.arange |
import numpy as np
from sklearn.naive_bayes import GaussianNB
from scipy.special import logsumexp
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import GroupShuffleSplit
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
class_set = 9
class ObservationsConditionsClassifier():
""" Container class for several NBGassuian classifiers
"""
def __init__(self, features, discriminant_model, n_angle_bins):
self.n_angle_bins = n_angle_bins
self.features = features
self.classifiers = [
ClassifierComposition(self.features, discriminant_model=discriminant_model) for _ in range(self.n_angle_bins)
]
def fit(self, df):
angle_point_estimates = | np.vstack(df['angle']) | numpy.vstack |
from PyQt5 import QtWidgets, uic
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QPixmap
import numpy as np
import sys
import os
from os import path
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import skimage.io
# create our own histogram function
def get_histogram(image, bins):
# array with size of bins, set to zeros
histogram = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for pixel in image:
histogram[pixel] += 1
# return our final result
return histogram
# create our cumulative sum function
def cumsum(a):
a = iter(a)
b = [next(a)]
for i in a:
b.append(b[-1] + i)
return np.array(b)
def get_histogram_rgb(image, bins):
# array with size of bins, set to zeros
b = image[:,:,0].flatten()
g = image[:,:,1].flatten()
r = image[:,:,2].flatten()
histogram_r = np.zeros(bins)
histogram_g = np.zeros(bins)
histogram_b = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for i in r:
histogram_r[i] += 1
for i in g:
histogram_g[i] += 1
for i in b:
histogram_b[i] += 1
# return our final result
return (histogram_r,histogram_g,histogram_b)
# function for color image equalization
def histogram_equalization_rgb(img_in):
# segregate color streams
b, g, r = cv2.split(img_in)
h_b, bin_b = np.histogram(b.flatten(), 256, [0, 256])
h_g, bin_g = np.histogram(g.flatten(), 256, [0, 256])
h_r, bin_r = np.histogram(r.flatten(), 256, [0, 256])
# calculate cdf
cdf_b = np.cumsum(h_b)
cdf_g = np.cumsum(h_g)
cdf_r = np.cumsum(h_r)
# mask all pixels with value=0 and replace it with mean of the pixel values
cdf_m_b = np.ma.masked_equal(cdf_b, 0)
cdf_m_b = (cdf_m_b - cdf_m_b.min()) * 255 / (cdf_m_b.max() - cdf_m_b.min())
cdf_final_b = np.ma.filled(cdf_m_b, 0).astype('uint8')
cdf_m_g = np.ma.masked_equal(cdf_g, 0)
cdf_m_g = (cdf_m_g - cdf_m_g.min()) * 255 / (cdf_m_g.max() - cdf_m_g.min())
cdf_final_g = np.ma.filled(cdf_m_g, 0).astype('uint8')
cdf_m_r = np.ma.masked_equal(cdf_r, 0)
cdf_m_r = (cdf_m_r - cdf_m_r.min()) * 255 / (cdf_m_r.max() - cdf_m_r.min())
cdf_final_r = np.ma.filled(cdf_m_r, 0).astype('uint8')
# merge the images in the three channels
img_b = cdf_final_b[b]
img_g = cdf_final_g[g]
img_r = cdf_final_r[r]
img_out = cv2.merge((img_b, img_g, img_r))
# validation
equ_b = cv2.equalizeHist(b)
equ_g = cv2.equalizeHist(g)
equ_r = cv2.equalizeHist(r)
equ = cv2.merge((equ_b, equ_g, equ_r))
# print(equ)
plt.figure(figsize=(20,20))
plt.imshow(equ)
plt.axis('off')
plt.savefig('./Output images/output.jpg', bbox_inches='tight',pad_inches = 0)
return img_out
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi('filter.ui', self)
self.actionadd_image.triggered.connect(self.openFileNameDialog)
self.btn1.clicked.connect(self.filter)
self.pushButton.clicked.connect(self.histogram)
if path.exists("Output images") == False:
os.mkdir("./Output images")
self.show()
def openFileNameDialog(self):
path = QFileDialog.getOpenFileName(self, 'Open a file', '', 'Image(*.jpg *.png)')
if path != ('', ''):
self.path = path[0]
self.name = os.path.basename(self.path)
pixmap = QPixmap(self.path)
self.filter_input.setPixmap(pixmap)
self.filter_input.setScaledContents(True)
self.input_equalize.setPixmap(pixmap)
self.input_equalize.setScaledContents(True)
print(self.path)
print(self.name)
self.filter_filtered.clear()
self.output_equalize.clear()
self.input_histogram.clear()
self.output_histogram.clear()
def filter(self):
img = cv2.imread(self.path)
if self.median.isChecked():
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
median = cv2.medianBlur(rgb_img,9)
im = Image.fromarray(median)
im.save("./Output images/median_filtered.jpg")
pixmap = QPixmap("./Output images/median_filtered.jpg")
self.filter_filtered.setPixmap(pixmap)
self.filter_filtered.setScaledContents(True)
########################################################################################################
elif self.laplacian.isChecked():
ddepth = cv2.CV_16S
kernel_size = 9
imageName = self.path
imgz = cv2.imread(cv2.samples.findFile(imageName), cv2.IMREAD_COLOR) # Load an image
smoothed_img = cv2.GaussianBlur(imgz, (3, 3), 0)
RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray_img = cv2.cvtColor(smoothed_img, cv2.COLOR_BGR2GRAY)
laplaced_img = cv2.Laplacian(gray_img, ddepth, ksize=kernel_size)
# fig = plt.figure(figsize=(12, 12))
# ax1 = fig.add_subplot(2,2,1)
# ax1.imshow(laplaced_img, cmap='gray')
plt.imshow(laplaced_img, cmap='gray')
plt.axis('off')
plt.savefig('./Output images/laplacian_filtered.jpg', bbox_inches='tight',pad_inches = 0)
pixmap = QPixmap("./Output images/laplacian_filtered.jpg")
self.filter_filtered.setPixmap(pixmap)
self.filter_filtered.setScaledContents(True)
plt.clf()
###################################################################################
elif self.lowpass.isChecked():
# do dft saving as complex output
dft = np.fft.fft2(img, axes=(0,1))
# apply shift of origin to center of image
dft_shift = np.fft.fftshift(dft)
# generate spectrum from magnitude image (for viewing only)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
# create circle mask
radius = 32
mask = np.zeros_like(img)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv2.circle(mask, (cx,cy), radius, (255,255,255), -1)[0]
# blur the mask
mask2 = cv2.GaussianBlur(mask, (19,19), 0)
# apply mask to dft_shift
dft_shift_masked = np.multiply(dft_shift,mask) / 255
dft_shift_masked2 = np.multiply(dft_shift,mask2) / 255
# shift origin from center to upper left corner
back_ishift = np.fft.ifftshift(dft_shift)
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
# do idft saving as complex output
img_back = np.fft.ifft2(back_ishift, axes=(0,1))
img_filtered = np.fft.ifft2(back_ishift_masked, axes=(0,1))
img_filtered2 = np.fft.ifft2(back_ishift_masked2, axes=(0,1))
# combine complex real and imaginary components to form (the magnitude for) the original image again
img_back = np.abs(img_back).clip(0,255).astype(np.uint8)
img_filtered = np.abs(img_filtered).clip(0,255).astype(np.uint8)
img_filtered2 = np.abs(img_filtered2).clip(0,255).astype(np.uint8)
# write result to disk
cv2.imwrite("./Output images/Lowpass_filtered.jpg", img_filtered2)
pixmap = QPixmap("./Output images/Lowpass_filtered.jpg")
self.filter_filtered.setPixmap(pixmap)
self.filter_filtered.setScaledContents(True)
######################################################################################################
elif self.highpass.isChecked():
# do dft saving as complex output
dft = np.fft.fft2(img, axes=(0,1))
# apply shift of origin to center of image
dft_shift = np.fft.fftshift(dft)
# generate spectrum from magnitude image (for viewing only)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
# create white circle mask on black background and invert so black circle on white background
# as highpass filter
radius = 32
mask = np.zeros_like(img, dtype=np.float32)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv2.circle(mask, (cx,cy), radius, (1,1,1), -1)[0]
mask = 1 - mask
# high boost filter (sharpening) = 1 + fraction of high pass filter
mask = 1 + 0.5*mask
# blur the mask
mask2 = cv2.GaussianBlur(mask, (19,19), 0)
# apply mask to dft_shift
dft_shift_masked = np.multiply(dft_shift,mask)
dft_shift_masked2 = np.multiply(dft_shift,mask2)
# shift origin from center to upper left corner
back_ishift = np.fft.ifftshift(dft_shift)
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
# do idft saving as complex output
img_back = | np.fft.ifft2(back_ishift, axes=(0,1)) | numpy.fft.ifft2 |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 14:28:56 2019
@author: balam
"""
from queue import PriorityQueue
import numpy as np
from ObstacleSpace import genObstacleSpace
import MapDiaplay as md
def actions(currentNode, currentCost):
newNodes = []
newNodesFinal = []
# vertical and horizontal nodes
for i in [(0,1),(0,-1),(1,0),(-1,0)]:
newNode = tuple(np.subtract(currentNode, i))
if not(newNode[0]<0 or newNode[1]<0 or newNode[0]>=mapX or newNode[1]>=mapY):
newNodes.append([currentCost + 1.0 + CtoG(goalNode,newNode),currentNode,newNode,True,currentCost + 1.0])
# corss nodes
for i in [(-1,-1),(-1,+1),(1,-1),(1,1)]:
newNode = tuple(np.subtract(currentNode, i))
if not(newNode[0]<0 or newNode[1]<0 or newNode[0]>=mapX or newNode[1]>=mapY):
newNodes.append([currentCost + 1.414 + CtoG(goalNode,newNode),currentNode,newNode,True,currentCost + 1.414])
for node in newNodes:
# update cost and parent if cost is less for already visited nodes
if node[2] in visitedNodes:
if nodes[np.ravel_multi_index(node[2],mapSize)][4] > node[4]:
nodes[np.ravel_multi_index(node[2],mapSize)][4] = node[4]
nodes[np.ravel_multi_index(node[2],mapSize)][1] = node[1]
# remove if in obstacle or visited nodes
if not(node[2] in obstacles.union(visitedNodes)):
newNodesFinal.append(node)
# update nodes map list
for node in newNodesFinal:
nodes[np.ravel_multi_index(node[2],mapSize)] = node
return newNodesFinal
def CtoG(fromNode, toNode):
return np.sqrt( | np.square(fromNode[0]-toNode[0]) | numpy.square |
"""
Impulse reponse-related code
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import scipy.linalg as L
from scipy import stats
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
#from statsmodels.tsa.api import VAR
from statsmodels.compat.python import range
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
mat = np.array
class BaseIRAnalysis(object):
"""
Base class for plotting and computing IRF-related statistics, want to be
able to handle known and estimated processes
"""
def __init__(self, model, P=None, periods=10, order=None, svar=False):
self.model = model
self.periods = periods
self.neqs, self.lags, self.T = model.neqs, model.k_ar, model.nobs
self.order = order
if P is None:
sigma = model.sigma_u
# TODO, may be difficult at the moment
# if order is not None:
# indexer = [model.get_eq_index(name) for name in order]
# sigma = sigma[:, indexer][indexer, :]
# if sigma.shape != model.sigma_u.shape:
# raise ValueError('variable order is wrong length')
P = la.cholesky(sigma)
self.P = P
self.svar = svar
self.irfs = model.ma_rep(periods)
if svar:
self.svar_irfs = model.svar_ma_rep(periods, P=P)
else:
self.orth_irfs = model.orth_ma_rep(periods)
self.cum_effects = self.irfs.cumsum(axis=0)
if svar:
self.svar_cum_effects = self.svar_irfs.cumsum(axis=0)
else:
self.orth_cum_effects = self.orth_irfs.cumsum(axis=0)
self.lr_effects = model.long_run_effects()
if svar:
self.svar_lr_effects = np.dot(model.long_run_effects(), P)
else:
self.orth_lr_effects = np.dot(model.long_run_effects(), P)
# auxiliary stuff
self._A = util.comp_matrix(model.coefs)
def cov(self, *args, **kwargs):
raise NotImplementedError
def cum_effect_cov(self, *args, **kwargs):
raise NotImplementedError
def plot(self, orth=False, impulse=None, response=None,
signif=0.05, plot_params=None, subplot_params=None,
plot_stderr=True, stderr_type='asym', repl=1000,
seed=None, component=None):
"""
Plot impulse responses
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
impulse : string or int
variable providing the impulse
response : string or int
variable affected by the impulse
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
subplot_params : dict
To pass to subplot plotting funcions. Example: if fonts are too big,
pass {'fontsize' : 8} or some number to your taste.
plot_params : dict
plot_stderr: bool, default True
Plot standard impulse response error bands
stderr_type: string
'asym': default, computes asymptotic standard errors
'mc': monte carlo standard errors (use rpl)
repl: int, default 1000
Number of replications for Monte Carlo and Sims-Zha standard errors
seed: int
np.random.seed for Monte Carlo replications
component: array or vector of principal component indices
"""
periods = self.periods
model = self.model
svar = self.svar
if orth and svar:
raise ValueError("For SVAR system, set orth=False")
if orth:
title = 'Impulse responses (orthogonalized)'
irfs = self.orth_irfs
elif svar:
title = 'Impulse responses (structural)'
irfs = self.svar_irfs
else:
title = 'Impulse responses'
irfs = self.irfs
if plot_stderr == False:
stderr = None
elif stderr_type not in ['asym', 'mc', 'sz1', 'sz2','sz3']:
raise ValueError("Error type must be either 'asym', 'mc','sz1','sz2', or 'sz3'")
else:
if stderr_type == 'asym':
stderr = self.cov(orth=orth)
if stderr_type == 'mc':
stderr = self.errband_mc(orth=orth, svar=svar,
repl=repl, signif=signif,
seed=seed)
if stderr_type == 'sz1':
stderr = self.err_band_sz1(orth=orth, svar=svar,
repl=repl, signif=signif,
seed=seed,
component=component)
if stderr_type == 'sz2':
stderr = self.err_band_sz2(orth=orth, svar=svar,
repl=repl, signif=signif,
seed=seed,
component=component)
if stderr_type == 'sz3':
stderr = self.err_band_sz3(orth=orth, svar=svar,
repl=repl, signif=signif,
seed=seed,
component=component)
plotting.irf_grid_plot(irfs, stderr, impulse, response,
self.model.names, title, signif=signif,
subplot_params=subplot_params,
plot_params=plot_params, stderr_type=stderr_type)
def plot_cum_effects(self, orth=False, impulse=None, response=None,
signif=0.05, plot_params=None,
subplot_params=None, plot_stderr=True,
stderr_type='asym', repl=1000, seed=None):
"""
Plot cumulative impulse response functions
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
impulse : string or int
variable providing the impulse
response : string or int
variable affected by the impulse
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
subplot_params : dict
To pass to subplot plotting funcions. Example: if fonts are too big,
pass {'fontsize' : 8} or some number to your taste.
plot_params : dict
plot_stderr: bool, default True
Plot standard impulse response error bands
stderr_type: string
'asym': default, computes asymptotic standard errors
'mc': monte carlo standard errors (use rpl)
repl: int, default 1000
Number of replications for monte carlo standard errors
seed: int
np.random.seed for Monte Carlo replications
"""
if orth:
title = 'Cumulative responses responses (orthogonalized)'
cum_effects = self.orth_cum_effects
lr_effects = self.orth_lr_effects
else:
title = 'Cumulative responses'
cum_effects = self.cum_effects
lr_effects = self.lr_effects
if stderr_type not in ['asym', 'mc']:
raise TypeError
else:
if stderr_type == 'asym':
stderr = self.cum_effect_cov(orth=orth)
if stderr_type == 'mc':
stderr = self.cum_errband_mc(orth=orth, repl=repl,
signif=signif, seed=seed)
if not plot_stderr:
stderr = None
plotting.irf_grid_plot(cum_effects, stderr, impulse, response,
self.model.names, title, signif=signif,
hlines=lr_effects, subplot_params=subplot_params,
plot_params=plot_params, stderr_type=stderr_type)
class IRAnalysis(BaseIRAnalysis):
"""
Impulse response analysis class. Computes impulse responses, asymptotic
standard errors, and produces relevant plots
Parameters
----------
model : VAR instance
Notes
-----
Using Lutkepohl (2005) notation
"""
def __init__(self, model, P=None, periods=10, order=None, svar=False):
BaseIRAnalysis.__init__(self, model, P=P, periods=periods,
order=order, svar=svar)
self.cov_a = model._cov_alpha
self.cov_sig = model._cov_sigma
# memoize dict for G matrix function
self._g_memo = {}
def cov(self, orth=False):
"""
Compute asymptotic standard errors for impulse response coefficients
Notes
-----
Lutkepohl eq 3.7.5
Returns
-------
"""
if orth:
return self._orth_cov()
covs = self._empty_covm(self.periods + 1)
covs[0] = np.zeros((self.neqs ** 2, self.neqs ** 2))
for i in range(1, self.periods + 1):
Gi = self.G[i - 1]
covs[i] = chain_dot(Gi, self.cov_a, Gi.T)
return covs
def errband_mc(self, orth=False, svar=False, repl=1000,
signif=0.05, seed=None, burn=100):
"""
IRF Monte Carlo integrated error bands
"""
model = self.model
periods = self.periods
if svar == True:
return model.sirf_errband_mc(orth=orth, repl=repl, T=periods,
signif=signif, seed=seed,
burn=burn, cum=False)
else:
return model.irf_errband_mc(orth=orth, repl=repl, T=periods,
signif=signif, seed=seed,
burn=burn, cum=False)
def err_band_sz1(self, orth=False, svar=False, repl=1000,
signif=0.05, seed=None, burn=100, component=None):
"""
IRF Sims-Zha error band method 1. Assumes symmetric error bands around
mean.
Parameters
----------
orth : bool, default False
Compute orthogonalized impulse responses
repl : int, default 1000
Number of MC replications
signif : float (0 < signif < 1)
Significance level for error bars, defaults to 95% CI
seed : int, default None
np.random seed
burn : int, default 100
Number of initial simulated obs to discard
component : neqs x neqs array, default to largest for each
Index of column of eigenvector/value to use for each error band
Note: period of impulse (t=0) is not included when computing
principle component
References
----------
Sims, <NAME>., and <NAME>. 1999. "Error Bands for Impulse
Response". Econometrica 67: 1113-1155.
"""
model = self.model
periods = self.periods
if orth:
irfs = self.orth_irfs
elif svar:
irfs = self.svar_irfs
else:
irfs = self.irfs
neqs = self.neqs
irf_resim = model.irf_resim(orth=orth, repl=repl, T=periods, seed=seed,
burn=100)
q = util.norm_signif_level(signif)
W, eigva, k =self._eigval_decomp_SZ(irf_resim)
if component != None:
if np.shape(component) != (neqs,neqs):
raise ValueError("Component array must be " + str(neqs) + " x " + str(neqs))
if np.argmax(component) >= neqs*periods:
raise ValueError("Atleast one of the components does not exist")
else:
k = component
# here take the kth column of W, which we determine by finding the largest eigenvalue of the covaraince matrix
lower = np.copy(irfs)
upper = | np.copy(irfs) | numpy.copy |
import multiprocessing as mp
from copy import copy
import numpy as np
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Callback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self, update_function, plot_type=PlotType.PLOT, axes_idx=None, legend=(), combine_to=None, color=None, ylim=None, bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatically_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0]["nbQ"] != ocp.nlp[i]["nbQ"]:
raise RuntimeError("Graphs with nbQ different at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp["tf"], self.ocp.CX):
self.t_idx_to_optimize.append(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.all_figures = []
self.automatically_organize = automatically_organize
self._organize_windows(len(self.ocp.nlp[0]["var_states"]) + len(self.ocp.nlp[0]["var_controls"]))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.all_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.all_figures):
if self.automatically_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis array"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp["nb_integration_steps"]
dt_ns = self.tf[phase_idx] / nlp["ns"]
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp["ns"]):
time_phase_integrated.append(np.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.append(time_phase_integrated)
self.ns += nlp["ns"] + 1
time_phase = np.linspace(last_t, last_t + self.tf[phase_idx], nlp["ns"] + 1)
last_t += self.tf[phase_idx]
self.t.append(time_phase)
def __create_plots(self):
"""Actually plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.append({})
if "plot" in nlp:
for key in nlp["plot"]:
if isinstance(nlp["plot"][key], tuple):
nlp["plot"][key] = nlp["plot"][key][0]
if nlp["plot"][key].phase_mappings is None:
size = (
nlp["plot"][key]
.function(np.zeros((nlp["nx"], 1)), np.zeros((nlp["nu"], 1)), np.zeros((nlp["np"], 1)))
.shape[0]
)
nlp["plot"][key].phase_mappings = Mapping(range(size))
else:
size = len(nlp["plot"][key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
nb = max(nlp["plot"][variable].phase_mappings.map_idx) + 1
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
if nlp["plot"][variable].combine_to:
self.axes[variable] = self.axes[nlp["plot"][variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
axes = self.__add_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp["plot"][variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp["plot"][variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp["plot"][variable].ylim:
ax.set_ylim(nlp["plot"][variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp["plot"][variable].bounds:
if nlp["plot"][variable].bounds.type != InterpolationType.CUSTOM:
y_min = nlp["plot"][variable].bounds.min[ctr].min()
y_max = nlp["plot"][variable].bounds.max[ctr].max()
else:
nlp["plot"][variable].bounds.check_and_adjust_dimensions(len(mapping), nlp["ns"])
y_min = min([nlp["plot"][variable].bounds.min.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_max = max([nlp["plot"][variable].bounds.max.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_range, _ = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
zero = np.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.append(
[plot_type, i, ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0]]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp["nb_integration_steps"]
for cmp in range(nlp["ns"]):
plots_integrated.append(
ax.plot(
self.t_integrated[i][cmp],
np.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.append([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
self.plots.append([plot_type, i, ax.step(t, zero, where="post", color=color, zorder=0)[0]])
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.append(ax.axvline(time, **self.plot_options["vertical_lines"]))
if self.axes[variable][0].bounds:
if self.axes[variable][0].bounds.type == InterpolationType.EACH_FRAME:
ns = self.axes[variable][0].bounds.min.shape[1] - 1
else:
ns = nlp["ns"]
self.axes[variable][0].bounds.check_and_adjust_dimensions(
nb_elements=len(mapping), nb_shooting=ns
)
bounds_min = np.array(
[self.axes[variable][0].bounds.min.evaluate_at(k)[j] for k in range(ns + 1)]
)
bounds_max = np.array(
[self.axes[variable][0].bounds.max.evaluate_at(k)[j] for k in range(ns + 1)]
)
if bounds_min.shape[0] == nlp["ns"]:
bounds_min = np.concatenate((bounds_min, [bounds_min[-1]]))
bounds_max = np.concatenate((bounds_max, [bounds_max[-1]]))
self.plots_bounds.append(
[ax.step(self.t[i], bounds_min, where='post', **self.plot_options["bounds"]), i]
)
self.plots_bounds.append(
[ax.step(self.t[i], bounds_max, where='post', **self.plot_options["bounds"]), i]
)
def __add_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatically_organize:
self.all_figures.append(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.all_figures.append(plt.figure(variable))
axes = self.all_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.all_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetically the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatically_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, concatenate=False
)
data_param_in_dyn = np.array([data_param[key] for key in data_param if key != "time"]).squeeze()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, concatenate=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp["nb_integration_steps"] + 1
nb_elements = nlp["ns"] * step_size + 1
state = np.ndarray((0, nb_elements))
for s in nlp["var_states"]:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = | np.concatenate((state, data_states_per_phase[s][i])) | numpy.concatenate |
from collections import defaultdict
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics import f1_score
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def split_labeled(data):
is_labeled = (data['label'] != -1)
return data[is_labeled], data[~is_labeled]
# def split_dataset(raw_dataset_path, new_dataset_path):
# # 主要是方便EDA
# item_cols = [f'i{i}' for i in range(1, 72+1)]
# user_cols = [f'u{i}' for i in range(1, 80+1)]
# try:
# with open(raw_dataset_path, 'r', encoding='utf-8') as rf:
# with open(new_dataset_path, 'w+', encoding='utf-8') as wf:
# if "train" in raw_dataset_path:
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")},label"""
# else: # "predict"
# header = f"""uuid,visit_time,user_id,item_id,{str(item_cols+user_cols)[2:-2].replace("'", "").replace(" ","")}"""
# wf.write(header+'\n')
# for line in rf:
# if "features" in line:
# continue
# line = str(line[:].split(" ")).replace("'", "")[1:-3]
# wf.write(line+'\n')
# except FileNotFoundError:
# print(f'{raw_dataset_path} 文件不存在!')
# def read_split_data(path, nrows=1000000):
# df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True, nrows=nrows)
# data = pd.concat([chunk for chunk in df_chunk])
# data = reduce_mem_usage(data)
# return data
def read_data(path='/tcdata/train0.csv', nrows=1000000):
if "train" in path:
df_chunk = pd.read_csv(path, chunksize=1e6, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features", "label"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
elif "predict" in path:
df_chunk = pd.read_csv(path, chunksize=5e5, iterator=True,
names=["uuid", "visit_time", "user_id", "item_id", "features"], nrows=nrows)
data = pd.concat([chunk for chunk in df_chunk])
data = reduce_mem_usage(data)
else: # "truth"
data = pd.read_csv(path, names=["uuid", "label"], nrows=nrows)
return data
def label_user_item_via_blacklist(data):
data_labeled, data_no_labeled = split_labeled(data)
data_spam = data_labeled[data_labeled.label == 1]
data_norm = data_labeled[data_labeled.label == 0]
try:
user_spam_dict = load_obj("user_black_dict")
item_spam_dict = load_obj("item_black_dict")
print("更新 user 和 item 黑名单")
except:
user_spam_dict = defaultdict(int)
item_spam_dict = defaultdict(int)
print("新建 user 和 item 黑名单")
for _, row in data_spam[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
user_spam_dict[u] += 1 # 记录次数
item_spam_dict[i] += 1 # 记录次数
save_obj(user_spam_dict, "user_black_dict")
save_obj(item_spam_dict, "item_black_dict")
# 1、根据label=1确定绝对无误的用户黑名单和商品黑名单
# 2、根据label=0 以及用户黑名单 确定当前用户是恶意的 则当前商品是正常的,将当前商品更新进商品白名单
# 根据label=0 以及商品黑名单 确定当前商品是恶意的 则当前用户是正常的,将当前用户更新进用户白名单
# 3、根据用户白名单 以及label=0 确定当前用户是正常的 则当前商品是(正常或潜在恶意的)
# 根据商品白名单 以及label=0 确定当前商品是正常的 则当前用户是(正常或潜在恶意的)
# 4、根据label=-1 以及 更新完毕的黑白名单 确定用户和商品的标签
# 可以忽略步骤3
try:
user_norm_dict = load_obj("user_white_dict")
item_norm_dict = load_obj("item_white_dict")
print("更新 user 和 item 白名单")
except:
user_norm_dict = defaultdict(int)
item_norm_dict = defaultdict(int)
print("新建 user 和 item 白名单")
for _, row in data_norm[['user_id', 'item_id']].iterrows():
u, i = row['user_id'], row['item_id']
if i in item_spam_dict.keys(): # 如果当前商品是恶意的
user_norm_dict[u] = 0 # 用户则是正常的,加入白名单
# else: #当前商品可能正常或潜在恶意
if u in user_spam_dict.keys(): # 如果当前用户是恶意的
item_norm_dict[i] = 0 # 商品则是正常的,加入白名单
# else: #当前用户可能正常或潜在恶意
# user_unknown_dict[u] = 0 #潜在的
save_obj(user_norm_dict, "user_white_dict")
save_obj(item_norm_dict, "item_white_dict")
print("基于黑名单和白名单,给未知样本打上标签")
def black_white_dict(ui, black_dict, white_dict):
if ui in black_dict.keys():
return 1
elif ui in white_dict.keys():
return 0
else:
return -1
data_no_labeled['user_label'] = data_no_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_norm_dict))
data_no_labeled['item_label'] = data_no_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_norm_dict))
def ui_label2label(u, i):
if u == 1 and i == 1:
return 1
elif ((u == 1 and i == 0) or (u == 0 and i == 1) or (u == 0 and i == 0)):
return 0
else:
return -1
data_no_labeled['label'] = list(map(lambda u, i: ui_label2label(
u, i), data_no_labeled['user_label'], data_no_labeled['item_label']))
data_labeled['user_label'] = data_labeled['user_id'].apply(
lambda u: black_white_dict(u, user_spam_dict, user_norm_dict))
data_labeled['item_label'] = data_labeled['item_id'].apply(
lambda i: black_white_dict(i, item_spam_dict, item_norm_dict))
data = pd.concat([data_no_labeled, data_labeled], axis=0)
return data
def label_data_via_blacklist(data):
data_labeled, data_no_labeled = split_labeled(data)
data_spam = data_labeled[data_labeled.label == 1]
data_norm = data_labeled[data_labeled.label == 0]
try:
ui_spam_dict = load_obj("user_item_black_dict")
print("更新 user-item 黑名单")
except:
ui_spam_dict = defaultdict(int)
print("新建 user-item 黑名单")
for _, row in data_spam[['user_id', 'item_id']].iterrows():
ui = (row['user_id'], row['item_id'])
ui_spam_dict[ui] += 1 # 记录次数
save_obj(ui_spam_dict, "user_item_black_dict")
try:
ui_norm_dict = load_obj("user_item_white_dict")
print("更新 user-item 白名单")
except:
ui_norm_dict = defaultdict(int)
print("新建 user-item 白名单")
for idx, row in data_norm[['user_id', 'item_id']].iterrows():
ui = (row['user_id'], row['item_id'])
ui_norm_dict[ui] = 0
save_obj(ui_norm_dict, "user_item_white_dict")
def black_white_list(ui, ui_spam_dict, ui_norm_dict):
if ui in ui_spam_dict.keys():
return 1
elif ui in ui_norm_dict.keys():
return 0
else:
return -1
print("基于<user_id,item_id>设置黑白名单,打上伪标签")
data_no_labeled['label'] = list(map(lambda u, i: black_white_list(
(u, i), ui_spam_dict, ui_norm_dict), data_no_labeled['user_id'], data_no_labeled['item_id']))
# data_pseudo = data_no_labeled[data_no_labeled.label != -1]
# data_labeled = pd.concat([data_pseudo, data_labeled], axis=0)
data = pd.concat([data_no_labeled, data_labeled], axis=0)
return data
def rand_mask(x, p=0.1):
# 保留id,剩下部分按概率p随机mask掉一部分特征
ids_mask = [True, True]
ids_mask.extend(np.random.rand(152) > p)
return x * np.array(ids_mask)
def evaluate_score(res_csv_path, truth_csv_path):
# "/root/tianchi_entry/result.csv"
df_pred = pd.read_csv(res_csv_path, names=[
'uuid', 'time_in', 'time_out', 'pred'])
df_truth = pd.read_csv(truth_csv_path, names=['uuid', 'label'])
time_diff = (df_pred['time_out'] - df_pred['time_in'])
time_mask = time_diff <= 500
f1 = f1_score(df_truth['label'][time_mask], df_pred['pred'][time_mask])
ratio = time_mask.mean()
print(f'avg time: {time_diff.mean()}')
print(f'f1 score: {f1}')
print(f'ratio : {ratio}')
print(f'score : {f1 * ratio}')
def find_best_threshold(y_true, y_pred, l=0.1, r=0.6, p=0.01):
thresholds = np.arange(l, r, p)
print(f"以精度为{p}在[{thresholds[0]},{thresholds[-1]}]范围内搜索F1最佳阈值", end=">>")
fscore = np.zeros(shape=(len(thresholds)))
for index, elem in enumerate(thresholds):
thr2sub = np.vectorize(lambda x: 1 if x > elem else 0)
y_preds = thr2sub(y_pred)
fscore[index] = f1_score(y_true, y_preds)
index = | np.argmax(fscore) | numpy.argmax |
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import csv
import numpy as np
from bingo.symbolic_regression.agraph.generator import AGraphGenerator
from bingo.symbolic_regression.agraph.component_generator \
import ComponentGenerator
from bingo.symbolic_regression.implicit_regression \
import ImplicitRegression, ImplicitTrainingData, _calculate_partials
from bingo.symbolic_regression.explicit_regression \
import ExplicitRegression, ExplicitTrainingData
import bingocpp
LOG_WIDTH = 78
NUM_AGRAPHS_INDVS = 100
COMMAND_ARRAY_SIZE = 128
NUM_X_VALUES = 128
EVAL_TIMING_NUMBER = 50
EVAL_TIMING_REPEATS = 10
FITNESS_TIMING_NUMBER = 50
FITNESS_TIMING_REPEATS = 10
CLO_TIMING_NUMBER = 4
CLO_TIMING_REPEATS = 4
class StatsPrinter:
def __init__(self, title="PERFORMANCE BENCHMARKS"):
self._header_format_string = \
"{:<26} {:>10} +- {:<10} {:^10} {:^10}"
self._format_string = \
"{:<26} {:>10.4f} +- {:<10.4f} {:^10.4f} {:^10.4f}"
diff = LOG_WIDTH - len(title) - 10
self._output = [
"-"*int(diff/2)+":::: {} ::::".format(title) + "-"*int((diff + 1)/2),
self._header_format_string.format("NAME", "MEAN",
"STD", "MIN", "MAX"),
"-"*LOG_WIDTH]
def add_stats(self, name, times, number=1, unit_mult=1):
std_time = np.std(times) / number * unit_mult
mean_time = np.mean(times) / number * unit_mult
max_time = np.max(times) / number * unit_mult
min_time = | np.min(times) | numpy.min |
from utils import detector_utils as detector_utils
from libs.pconv_layer import PConv2D
import cv2
import tensorflow as tf
import datetime
import argparse
import numpy as np
import keras
thresh = 0.9
moving_num = 3
m_input_size = 256
detection_graph, sess = detector_utils.load_inference_graph()
print("model loading...")
model_hand = keras.models.load_model('model/model_hand.h5', compile=False)
_ = model_hand.predict(np.zeros((1,96,96,3)))
model_partial = keras.models.load_model('model/model_partial.h5', compile=False, custom_objects={'PConv2D': PConv2D})
_ = model_partial.predict([np.zeros((1,m_input_size,m_input_size,3)), np.zeros((1,m_input_size,m_input_size,3))])
flag = False
start_flag = False
status = "none"
matrix = []
predict_num = 0
result = | np.zeros((1,3)) | numpy.zeros |
from .builder import DATASETS
from .coco import CocoDataset
import numpy as np
from mmdet.utils import get_vocabulary
@DATASETS.register_module()
class CocoTextDataset(CocoDataset):
CLASSES = ('text', )
def __init__(self, ann_file,pipeline,max_seq_len=25, **kwargs):
super(CocoTextDataset, self).__init__(ann_file, pipeline, **kwargs)
self.voc, self.char2id, _ = get_vocabulary("ALLCASES_SYMBOLS")
self.max_seq_len = max_seq_len
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation with transcription.
Args:
ann_info (list[dict]): Annotation info of an image .
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map, text_labels. "masks" are raw annotations and not
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
gt_texts = []
gt_text_masks = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
word = ann['transcription']
word_label = []
for char in word:
if char in self.char2id:
word_label.append(self.char2id[char])
else:
word_label.append(self.char2id['UNK'])
seq_label = np.full(self.max_seq_len, self.char2id['PAD'], dtype=np.int)
seq_mask = np.full(self.max_seq_len, 0, dtype=np.int)
if len(word_label) > (self.max_seq_len - 1):
word_label = word_label[:(self.max_seq_len - 1)]
word_label = word_label + [self.char2id['EOS']]
seq_label[:len(word_label)] = np.array(word_label)
word_len = len(word_label)
seq_mask[:word_len] = 1
gt_texts.append(seq_label)
gt_text_masks.append(seq_mask)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = | np.zeros((0, 4), dtype=np.float32) | numpy.zeros |
import numpy as np
from .orcadaq import OrcaDecoder, get_ccc, get_readout_info, get_auxhw_info
from .fcdaq import FlashCamEventDecoder
class ORCAFlashCamListenerConfigDecoder(OrcaDecoder):
'''
Decoder for FlashCam listener config written by ORCA
'''
def __init__(self, *args, **kwargs):
self.decoder_name = 'ORFlashCamListenerConfigDecoder'
self.orca_class_name = 'ORFlashCamListenerModel'
# up through ch_inputnum, these are in order of the fcio data format
# for similicity. append any additional values after this.
self.decoded_values = {
'readout_id': { 'dtype': 'uint16', },
'listener_id': { 'dtype': 'uint16', },
'telid': { 'dtype': 'int32', },
'nadcs': { 'dtype': 'int32', },
'ntriggers': { 'dtype': 'int32', },
'nsamples': { 'dtype': 'int32', },
'adcbits': { 'dtype': 'int32', },
'sumlength': { 'dtype': 'int32', },
'blprecision': { 'dtype': 'int32', },
'mastercards': { 'dtype': 'int32', },
'triggercards': { 'dtype': 'int32', },
'adccards': { 'dtype': 'int32', },
'gps': { 'dtype': 'int32', },
'ch_boardid': { 'dtype': 'uint16',
'datatype':
'array_of_equalsized_arrays<1,1>{real}',
'length': 2400, },
'ch_inputnum': { 'dtype': 'uint16',
'datatype':
'array_of_equalsized_arrays<1,1>{real}',
'length': 2400, },
}
super().__init__(args, kwargs)
def get_decoded_values(self, channel=None):
return self.decoded_values
def max_n_rows_per_packet(self):
return 1
def decode_packet(self, packet, lh5_tables,
packet_id, header_dict, verbose=False):
data = np.frombuffer(packet, dtype=np.int32)
tbl = lh5_tables
ii = tbl.loc
tbl['readout_id'].nda[ii] = (data[0] & 0xffff0000) >> 16
tbl['listener_id'].nda[ii] = data[0] & 0x0000ffff
for i,k in enumerate(self.decoded_values):
if i < 2: continue
tbl[k].nda[ii] = data[i-1]
if k == 'gps': break
data = np.frombuffer(packet, dtype=np.uint32)
data = data[list(self.decoded_values.keys()).index('ch_boardid')-1:]
for i in range(len(data)):
tbl['ch_boardid'].nda[ii][i] = (data[i] & 0xffff0000) >> 16
tbl['ch_inputnum'].nda[ii][i] = data[i] & 0x0000ffff
tbl.push_row()
class ORCAFlashCamListenerStatusDecoder(OrcaDecoder):
'''
Decoder for FlashCam status packets written by ORCA
Some of the card level status data contains an array of values
(temperatures for instance) for each card. Since lh5 currently only
supports a 1d vector of 1d vectors, this (card,value) data has to be
flattened before populating the lh5 table.
'''
def __init__(self, *args, **kwargs):
self.decoder_name = 'ORFlashCamListenerStatusDecoder'
self.orca_class_name = 'ORFlashCamListenerModel'
self.nOtherErrors = np.uint32(5)
self.nEnvMonitors = np.uint32(16)
self.nCardTemps = np.uint32(5)
self.nCardVoltages = np.uint32(6)
self.nADCTemps = np.uint32(2)
self.nCTILinks = np.uint32(4)
self.nCards = np.uint32(1)
self.decoded_values = {
'readout_id': { 'dtype': 'uint16', },
'listener_id': { 'dtype': 'uint16', },
'cards': { 'dtype': 'int32', },
'status': { 'dtype': 'int32', },
'statustime': { 'dtype': 'float64', 'units': 's', },
'cputime': { 'dtype': 'float64', 'units': 's', },
'startoffset': { 'dtype': 'float64', 'units': 's', },
'card_fcio_id': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_status': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_event_number': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_pps_count': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_tick_count': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_max_ticks': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_total_errors': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_env_errors': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_cti_errors': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_link_errors': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards, },
'card_other_errors': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nOtherErrors, },
'card_temp': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nCardTemps,
'units': 'mC', },
'card_voltage': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nCardVoltages,
'units': 'mV', },
'card_current': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards,
'units': 'mA', },
'card_humidity': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards,
'units': 'o/oo', },
'card_adc_temp': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nADCTemps,
'units': 'mC', },
'card_cti_link': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nCTILinks, },
'card_card_link_state': {
'dtype': 'uint32',
'datatype': 'array<1>{array<1>{real}}',
'length_guess': self.nCards * self.nCards, },
}
# arrays to temporarily store card-level decoded data
self.cdata = {}
self.resize_card_data(ncards=self.nCards)
super().__init__(args, kwargs)
def resize_card_data(self, ncards):
try: ncards = np.uint32(ncards)
except ValueError: return
if ncards == 0: return
for key in self.decoded_values:
# ignore keys that aren't card level
if key.find('card_') != 0: continue
try:
# skip keys for things that aren't arrays with a length_guess
if self.decoded_values[key]['datatype'].find('array') != 0:
continue
length = self.decoded_values[key]['length_guess']
try:
# resize if ncards differs from the old shape
oldshape = self.cdata[key].shape
if oldshape[0] == ncards: continue
if key.find('card_card_') == 0:
self.cdata[key].resize((ncards,ncards,) + oldshape[2:])
else:
self.cdata[key].resize((ncards,) + oldshape[1:])
except KeyError:
# if the key didn't exist set the ndarray for this key
if ((length == ncards or (length % ncards) != 0) and
key.find('card_card_') == -1):
self.cdata[key] = np.ndarray(shape=(length),
dtype=np.uint32)
else:
nval = np.uint32(length / ncards)
self.cdata[key] = np.ndarray(shape=(ncards, nval),
dtype=np.uint32)
except KeyError: continue
# set nCards to allow for not calling this function during decoding
self.nCards = ncards
def get_decoded_values(self, channel=None):
return self.decoded_values
def max_n_rows_per_packet(self):
return 1
def decode_packet(self, packet, lh5_tables,
packet_id, header_dict, verbose=False):
data = np.frombuffer(packet, dtype=np.uint32)
tbl = lh5_tables
ii = tbl.loc
# populate the packet header information
tbl['readout_id'].nda[ii] = (data[0] & 0xffff0000) >> 16
tbl['listener_id'].nda[ii] = data[0] & 0x0000ffff
tbl['status'].nda[ii] = np.int32(data[1])
tbl['statustime'].nda[ii] = np.float64(data[2] + data[3] / 1.0e6)
tbl['cputime'].nda[ii] = np.float64(data[4] + data[5] / 1.0e6)
tbl['startoffset'].nda[ii] = np.float64(data[7] + data[8] / 1.0e6)
tbl['cards'].nda[ii] = np.int32(data[12])
# resize the card level data if necessary
if data[12] != self.nCards:
print('ORlashCamListenerStatusDecoder: resizing card arrays '
'from', self.nCards, ' cards to', data[12])
self.resize_card_data(ncards=data[12])
# set the local card level data
for i in range( | np.int(data[12]) | numpy.int |
"""
Tools for making FSPS templates
"""
import os
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.cosmology import WMAP9
FLAM_CGS = u.erg/u.second/u.cm**2/u.Angstrom
LINE_CGS = 1.e-17*u.erg/u.second/u.cm**2
try:
from dust_attenuation.baseclasses import BaseAttAvModel
except:
BaseAttAvModel = object
from astropy.modeling import Parameter
import astropy.units as u
try:
from fsps import StellarPopulation
except:
# Broken, but imports
StellarPopulation = object
from . import utils
from . import templates
DEFAULT_LABEL = 'fsps_tau{tau:3.1f}_logz{logzsol:4.2f}_tage{tage:4.2f}_av{Av:4.2f}'
WG00_DEFAULTS = dict(geometry='shell', dust_type='mw',
dust_distribution='homogeneous')
class Zafar15(BaseAttAvModel):
"""
Quasar extinction curve from Zafar et al. (2015)
https://ui.adsabs.harvard.edu/abs/2015A%26A...584A.100Z/abstract
"""
name = 'Zafar+15'
#bump_ampl = 1.
Rv = 2.21 # err 0.22
@staticmethod
def Alam(mu, Rv):
"""
klam, eq. 1
"""
x = 1/mu
# My fit
coeffs = np.array([0.05694421, 0.57778243, -0.12417444])
Alam = np.polyval(coeffs, x)*2.21/Rv
# Only above x > 5.90
fuv = x > 5.90
if fuv.sum() > 0:
Afuv = 1/Rv*(-4.678+2.355*x + 0.622*(x-5.90)**2) + 1.
Alam[fuv] = Afuv[fuv]
return Alam
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.micron
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
alam = self.Alam(mu, self.Rv) #*self.Rv
# Rv = Av/EBV
# EBV=Av/Rv
# Ax = Alam/Av
#
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum(alam*Av, 0.)
class ExtinctionModel(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
#from dust_extinction.averages import G03_SMCBar
#SMCBar = G03_SMCBar()
curve_type = 'smc'
init_curve = None
#@property
def _curve_model(self):
if self.init_curve == self.curve_type:
return 0
if self.curve_type.upper() == 'SMC':
from dust_extinction.averages import G03_SMCBar as curve
elif self.curve_type.upper() == 'LMC':
from dust_extinction.averages import G03_LMCAvg as curve
elif self.curve_type.upper() in ['MW','F99']:
from dust_extinction.parameter_averages import F99 as curve
else:
raise ValueError(f'curve_type {self.curve_type} not recognized')
self.curve = curve()
self.init_curve = self.curve_type
def evaluate(self, x, Av):
self._curve_model()
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
if self.curve_type.upper() in ['MW','F99']:
curve = self.curve
klam = curve.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron),
Rv=curve.Rv)
else:
klam = self.curve.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class SMC(BaseAttAvModel):
"""
Modify `dust_extinction.averages.G03_SMCBar` to work as Att
"""
from dust_extinction.averages import G03_SMCBar
SMCBar = G03_SMCBar()
def evaluate(self, x, Av):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
xinv = 1./xin.to(u.micron)
klam = self.SMCBar.evaluate(1/np.clip(xinv,
0.301/u.micron, 9.99/u.micron))
return klam*Av
class Reddy15(BaseAttAvModel):
"""
Attenuation curve from Reddy et al. (2015)
With optional UV bump
https://ui.adsabs.harvard.edu/abs/2015ApJ...806..259R/abstract
"""
name = 'Reddy+15'
#bump_ampl = 1.
bump_ampl = Parameter(description="Amplitude of UV bump",
default=2., min=0., max=10.)
bump_gamma = 0.04
bump_x0 = 0.2175
Rv = 2.505
@staticmethod
def _left(mu):
"""
klam, mu < 0.6 micron
"""
return -5.726 + 4.004/mu - 0.525/mu**2 + 0.029/mu**3 + 2.505
@staticmethod
def _right(mu):
"""
klam, mu > 0.6 micron
"""
return -2.672 - 0.010/mu + 1.532/mu**2 - 0.412/mu**3 + 2.505
@property
def koffset(self):
"""
Force smooth transition at 0.6 micron
"""
return self._left(0.6) - self._right(0.6)
def evaluate(self, x, Av, bump_ampl):
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = np.atleast_1d(x)
mu = xin.to(u.micron).value
left = mu < 0.6
klam = mu*0.
# Reddy Eq. 8
kleft = self._left(mu)
kright = self._right(mu)
klam[left] = self._left(mu[left])
klam[~left] = self._right(mu[~left]) + self.koffset
# Rv = Av/EBV
# EBV=Av/Rv
# klam = Alam/EBV
# Alam = klam*EBV = klam*Av/Rv
return np.maximum((klam + self.uv_bump(mu, bump_ampl))*Av/self.Rv, 0.)
def uv_bump(self, mu, bump_ampl):
"""
Drude profile for computing the UV bump.
Parameters
----------
x: np array (float)
expects wavelengths in [micron]
x0: float
Central wavelength of the UV bump (in microns).
gamma: float
Width (FWHM) of the UV bump (in microns).
ampl: float
Amplitude of the UV bump.
Returns
-------
np array (float)
lorentzian-like Drude profile
Raises
------
ValueError
Input x values outside of defined range
"""
return bump_ampl * (mu**2 * self.bump_gamma**2 /
((mu**2 - self.bump_x0**2)**2 +
mu**2 * self.bump_gamma**2))
class KC13(BaseAttAvModel):
"""
Kriek & Conroy (2013) attenuation model, extends Noll 2009 with UV bump
amplitude correlated with the slope, delta.
Slightly different from KC13 since the N09 model uses Leitherer (2002)
below 1500 Angstroms.
"""
name = 'Kriek+Conroy2013'
delta = Parameter(description="delta: slope of the power law",
default=0., min=-3., max=3.)
#extra_bump = 1.
extra_params = {'extra_bump':1.}
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.9e-4, 2.e8]
averages.x_range_C00 = [0.9e-4, 2.e8]
averages.x_range_L02 = [0.9e-4, 0.18]
self.N09 = shapes.N09()
def evaluate(self, x, Av, delta):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = 0.2175
gamma = 0.0350
ampl = (0.85 - 1.9*delta)*self.extra_params['extra_bump']
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
if dust_attenuation.__version__ >= '0.0.dev131':
return self.N09.evaluate(xin, x0, gamma, ampl, delta, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, delta)
class ParameterizedWG00(BaseAttAvModel):
coeffs = {'Av': np.array([-0.001, 0.026, 0.643, -0.016]),
'x0': np.array([ 3.067e-19, -7.401e-18, 6.421e-17, -2.370e-16,
3.132e-16, 2.175e-01]),
'gamma': np.array([ 2.101e-06, -4.135e-05, 2.719e-04,
-7.178e-04, 3.376e-04, 4.270e-02]),
'ampl': np.array([-1.906e-03, 4.374e-02, -3.501e-01,
1.228e+00, -2.151e+00, 8.880e+00]),
'slope': np.array([-4.084e-05, 9.984e-04, -8.893e-03,
3.670e-02, -7.325e-02, 5.891e-02])}
# Turn off bump
include_bump = 0.25
wg00_coeffs = {'geometry': 'shell',
'dust_type': 'mw',
'dust_distribution': 'homogeneous'}
name = 'ParameterizedWG00'
# def __init__(self, Av=1.0, **kwargs):
# """
# Version of the N09 curves fit to the WG00 curves up to tauV=10
# """
# from dust_attenuation import averages, shapes, radiative_transfer
#
# # Allow extrapolation
# shapes.x_range_N09 = [0.01, 1000]
# averages.x_range_C00 = [0.01, 1000]
# averages.x_range_L02 = [0.01, 0.18]
#
# self.N09 = shapes.N09()
def _init_N09(self):
from dust_attenuation import averages, shapes, radiative_transfer
# Allow extrapolation
shapes.x_range_N09 = [0.009, 2.e8]
averages.x_range_C00 = [0.009, 2.e8]
averages.x_range_L02 = [0.009, 0.18]
self.N09 = shapes.N09()
def get_tau(self, Av):
"""
Get the WG00 tau_V for a given Av
"""
tau_grid = np.arange(0, 10, 0.01)
av_grid = np.polyval(self.coeffs['Av'], tau_grid)
return np.interp(Av, av_grid, tau_grid, left=0., right=tau_grid[-1])
def evaluate(self, x, Av):
import dust_attenuation
if not hasattr(self, 'N09'):
self._init_N09()
tau_V = self.get_tau(Av)
#Av = np.polyval(self.coeffs['Av'], tau_V)
x0 = np.polyval(self.coeffs['x0'], tau_V)
gamma = np.polyval(self.coeffs['gamma'], tau_V)
if self.include_bump:
ampl = np.polyval(self.coeffs['ampl'], tau_V)*self.include_bump
else:
ampl = 0.
slope = np.polyval(self.coeffs['slope'], tau_V)
if not hasattr(x, 'unit'):
xin = np.atleast_1d(x)*u.Angstrom
else:
xin = x
if dust_attenuation.__version__ >= '0.0.dev131':
return self.N09.evaluate(xin, x0, gamma, ampl, slope, Av)
else:
return self.N09.evaluate(xin, Av, x0, gamma, ampl, slope)
def fsps_line_info(wlimits=None):
"""
Read FSPS line list
"""
try:
info_file = os.path.join(os.getenv('SPS_HOME'), 'data/emlines_info.dat')
with open(info_file, 'r') as f:
lines = f.readlines()
except:
return [], []
waves = np.array([float(l.split(',')[0]) for l in lines])
names = np.array([l.strip().split(',')[1].replace(' ','') for l in lines])
if wlimits is not None:
clip = (waves > wlimits[0]) & (waves < wlimits[1])
waves = waves[clip]
names = names[clip]
return waves, names
DEFAULT_LINES = fsps_line_info(wlimits=[1200, 1.9e4])[0]
BOUNDS = {}
BOUNDS['tage'] = [0.03, 12, 0.05]
BOUNDS['tau'] = [0.03, 2, 0.05]
BOUNDS['zred'] = [0.0, 13, 1.e-4]
BOUNDS['Av'] = [0.0, 15, 0.05]
BOUNDS['gas_logu'] = [-4, 0, 0.05]
BOUNDS['gas_logz'] = [-2, 0.3, 0.05]
BOUNDS['logzsol'] = [-2, 0.3, 0.05]
BOUNDS['sigma_smooth'] = [100, 500, 0.05]
def wuyts_line_Av(Acont):
"""
Wuyts prescription for extra extinction towards nebular emission
"""
return Acont + 0.9*Acont - 0.15*Acont**2
class ExtendedFsps(StellarPopulation):
"""
Extended functionality for the `~fsps.StellarPopulation` object
"""
lognorm_center = 0.
lognorm_logwidth = 0.05
is_lognorm_sfh = False
lognorm_fburst = -30
cosmology = WMAP9
scale_lyman_series = 0.1
scale_lines = OrderedDict()
line_av_func = None
#_meta_bands = ['v']
@property
def izmet(self):
"""
Get zmet index for nearest ``self.zlegend`` value to ``loggzsol``.
"""
NZ = len(self.zlegend)
logzsol = self.params['logzsol']
zi = np.interp(logzsol, np.log10(self.zlegend/0.019), np.arange(NZ))
return np.clip(np.cast[int](np.round(zi)), 0, NZ-1)
@property
def fsps_ages(self):
"""
(linear) ages of the FSPS SSP age grid, Gyr
"""
if hasattr(self, '_fsps_ages'):
return self._fsps_ages
_ = self.get_spectrum()
fsps_ages = 10**(self.log_age-9)
self._fsps_ages = fsps_ages
return fsps_ages
def set_lognormal_sfh(self, min_sigma=3, verbose=False, **kwargs):
"""
Set lognormal tabular SFH
"""
try:
from grizli.utils_c.interp import interp_conserve_c as interp_func
except:
interp_func = utils.interp_conserve
if 'lognorm_center' in kwargs:
self.lognorm_center = kwargs['lognorm_center']
if 'lognorm_logwidth' in kwargs:
self.lognorm_logwidth = kwargs['lognorm_logwidth']
if self.is_lognorm_sfh:
self.params['sfh'] = 3
if verbose:
msg = 'lognormal SFH ({0}, {1}) [sfh3={2}]'
print(msg.format(self.lognorm_center, self.lognorm_logwidth,
self.is_lognorm_sfh))
xages = np.logspace(np.log10(self.fsps_ages[0]),
np.log10(self.fsps_ages[-1]), 2048)
mu = self.lognorm_center#*np.log(10)
# sfh = 1./t*exp(-(log(t)-mu)**2/2/sig**2)
logn_sfh = 10**(-(np.log10(xages)-mu)**2/2/self.lognorm_logwidth**2)
logn_sfh *= 1./xages
# Normalize
logn_sfh *= 1.e-9/(self.lognorm_logwidth*np.sqrt(2*np.pi*np.log(10)))
self.set_tabular_sfh(xages, logn_sfh)
self._lognorm_sfh = (xages, logn_sfh)
def lognormal_integral(self, tage=0.1, **kwargs):
"""
Integral of lognormal SFH up to t=tage
"""
from scipy.special import erfc
mu = self.lognorm_center*np.log(10)
sig = self.lognorm_logwidth*np.sqrt(np.log(10))
cdf = 0.5*erfc(-(np.log(tage)-mu)/sig/np.sqrt(2))
return cdf
def _set_extend_attrs(self, line_sigma=50, lya_sigma=200, **kwargs):
"""
Set attributes on `~fsps.StellarPopulation` object used by `narrow_lines`.
sigma : line width (FWHM/2.35), km/s.
lya_sigma : width for Lyman-alpha
Sets `emline_dlam`, `emline_sigma` attributes.
"""
# Line widths, native FSPS and new
wave, line = self.get_spectrum(tage=1., peraa=True)
dlam = np.diff(wave)
self.emline_dlam = [np.interp(w, wave[1:], dlam)
for w in self.emline_wavelengths] # Angstrom
self.emline_sigma = [line_sigma for w in self.emline_wavelengths] #kms
# Separate Ly-alpha
lya_ix = np.argmin(np.abs(self.emline_wavelengths - 1216.8))
self.emline_sigma[lya_ix] = lya_sigma
# Line EWs computed in `narrow_emission_lines`
self.emline_eqw = [-1e10 for w in self.emline_wavelengths]
# Emission line names
waves, names = fsps_line_info()
if np.allclose(self.emline_wavelengths, waves, 0.5):
self.emline_names = names
else:
self.emline_names = ['?'] * len(self.emline_wavelengths)
for w, n in zip(waves, names):
dl = np.abs(self.emline_wavelengths - w)
if dl.min() < 0.5:
self.emline_names[np.argmin(dl)] = n
for l in self.emline_names:
self.scale_lines[l] = 1.
# Precomputed arrays for WG00 reddening defined between 0.1..3 um
self.wg00lim = (self.wavelengths > 1000) & (self.wavelengths < 3.e4)
self.wg00red = (self.wavelengths > 1000)*1.
self.exec_params = None
self.narrow = None
def narrow_emission_lines(self, tage=0.1, emwave=DEFAULT_LINES, line_sigma=100, oversample=5, clip_sigma=10, verbose=False, get_eqw=True, scale_lyman_series=None, scale_lines={}, force_recompute=False, use_sigma_smooth=True, lorentz=False, **kwargs):
"""
Replace broad FSPS lines with specified line widths
tage : age in Gyr of FSPS model
FSPS sigma: line width in A in FSPS models
emwave : (approx) wavelength of line to replace
line_sigma : line width in km/s of new line
oversample : factor by which to sample the Gaussian profiles
clip_sigma : sigmas from line center to use for the line
scale_lyman_series : scaling to apply to Lyman-series emission lines
scale_lines : scaling to apply to other emission lines, by name
Returns: `dict` with keys
wave_full, flux_full, line_full = wave and flux with fine lines
wave, flux_line, flux_clean = original model + removed lines
ymin, ymax = range of new line useful for plotting
"""
if not hasattr(self, 'emline_dlam'):
self._set_extend_attrs(line_sigma=line_sigma, **kwargs)
self.params['add_neb_emission'] = True
if scale_lyman_series is None:
scale_lyman_series = self.scale_lyman_series
else:
self.scale_lyman_series = scale_lyman_series
if scale_lines is None:
scale_lines = self.scale_lines
else:
for k in scale_lines:
if k in self.scale_lines:
self.scale_lines[k] = scale_lines[k]
else:
print(f'Line "{k}" not found in `self.scale_lines`')
# Avoid recomputing if all parameters are the same (i.e., change Av)
call_params = np.hstack([self.param_floats(params=None), emwave,
list(self.scale_lines.values()),
[tage, oversample, clip_sigma, scale_lyman_series]])
try:
is_close = np.allclose(call_params, self.exec_params)
except:
is_close = False
if is_close & (not force_recompute):
if verbose:
print('use stored')
return self.narrow
self.exec_params = call_params
wave, line = self.get_spectrum(tage=tage, peraa=True)
line_ix = [np.argmin(np.abs(self.emline_wavelengths - w))
for w in emwave]
line_lum = [self.emline_luminosity[i] for i in line_ix]
line_wave = [self.emline_wavelengths[i] for i in line_ix]
fsps_sigma = [np.sqrt((2*self.emline_dlam[i])**2 +
(self.params['sigma_smooth']/3.e5*self.emline_wavelengths[i])**2)
for i in line_ix]
if line_sigma < 0:
lines_sigma = [-line_sigma for ix in line_ix]
elif (self.params['sigma_smooth'] > 0) & (use_sigma_smooth):
lines_sigma = [self.params['sigma_smooth'] for ix in line_ix]
else:
lines_sigma = [self.emline_sigma[ix] for ix in line_ix]
line_dlam = [sig/3.e5*lwave
for sig, lwave in zip(lines_sigma, line_wave)]
clean = line*1
wlimits = [np.min(emwave), np.max(emwave)]
wlimits = [2./3*wlimits[0], 4.3*wlimits[1]]
wfine = utils.log_zgrid(wlimits, np.min(lines_sigma)/oversample/3.e5)
qfine = wfine < 0
if verbose:
msg = 'Matched line: {0} [{1}], lum={2}'
for i, ix in enumerate(line_ix):
print(msg.format(line_wave[i], ix, line_lum[i]))
#########
# Remove lines from FSPS
# line width seems to be 2*dlam at the line wavelength
for i, ix in enumerate(line_ix):
gauss = 1/np.sqrt(2*np.pi*fsps_sigma[i]**2)
gauss *= np.exp(-(wave - line_wave[i])**2/2/fsps_sigma[i]**2)
clean -= gauss*line_lum[i]
# indices of fine array where new lines defined
qfine |= np.abs(wfine - line_wave[i]) < clip_sigma*line_dlam[i]
# Linear interpolate cleaned spectrum on fine grid
iclean = np.interp(wfine[qfine], wave, clean)
# Append original and fine sampled arrays
wfull = np.append(wave, wfine[qfine])
cfull = np.append(clean, iclean)
so = np.argsort(wfull)
wfull, uniq = | np.unique(wfull, return_index=True) | numpy.unique |
import ast
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import wilcoxon
from matplotlib.ticker import FormatStrFormatter
import matplotlib
from tabulate import tabulate
text_dir = 'data/qa_example/'
counterfactual_dir = 'counterfactuals/qa_example/model_dist_1layer/'
probe_type = 'model_dist'
test_layers = [i for i in range(1, 25)]
layer_offset = test_layers[0]
# For a single sentence, plot the distribution over start probabilities for the original and updated embeddings
# as well as just the deltas.
def plot_sentence_probs(sentence_idx):
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(10, 10))
fig.suptitle("Absolute and Relative Start Token Probabilities")
x_axis = [i + 1 for i in range(len(original_start_probs[sentence_idx]))]
# Plot the absolute probability values
ax1.set_title("Start probabilities for layer " + str(layer) + " and sentence " + str(sentence_idx))
ax1.set_xlabel('Token idx')
ax1.errorbar(x_axis, original_start_probs[sentence_idx], linestyle='--', color='green', marker='s', label='Original')
ax1.errorbar(x_axis, nn1_parse_updated_start_probs[sentence_idx], color='red', marker='s', label='Conj. Parse')
ax1.errorbar(x_axis, nn2_parse_updated_start_probs[sentence_idx], color='blue', marker='s', label='NN2 Parse')
ax1.legend(loc="upper left")
ax2.set_title("Changes in start probabilities for layer " + str(layer) + " and sentence " + str(sentence_idx))
ax2.set_xlabel('Token idx')
nn1_delta = [nn1_parse_updated_start_probs[sentence_idx][i] - original_start_probs[sentence_idx][i] for i in range(len(original_start_probs[sentence_idx]))]
nn2_delta = [nn2_parse_updated_start_probs[sentence_idx][i] - original_start_probs[sentence_idx][i] for i in range(len(original_start_probs[sentence_idx]))]
ax2.errorbar(x_axis, nn1_delta, color='red', marker='s', label='Conj. Parse')
ax2.errorbar(x_axis, nn2_delta, color='blue', marker='s', label='NN2 Parse')
ax2.legend(loc='upper left')
plt.show()
# Read in the other question info as well
corpus_types = []
answer_lengths = []
start_likelihoods = []
contexts = []
questions = []
answers = []
with open(text_dir + 'setup.txt', 'r') as setup_file:
for line_idx, line in enumerate(setup_file):
split_line = line.split('\t')
corpus_types.append(split_line[0])
answer_lengths.append(int(split_line[1]))
start_likelihoods.append(float(split_line[2]))
contexts.append(split_line[3])
questions.append(split_line[4])
answers.append(split_line[5])
# Read in the token id data. We care about probability changes at specific locations, which were stored way back
# when the corpus was generated in token_idxs.txt.
# We care about 4 locations (determiner and noun) x (location 1 and location 2)
det1_token_idxs = []
nn1_token_idxs = []
det2_token_idxs = []
nn2_token_idxs = []
all_token_idxs = [det1_token_idxs, nn1_token_idxs, det2_token_idxs, nn2_token_idxs]
with open(text_dir + 'token_idxs.txt', 'r') as token_file:
for line_idx, line in enumerate(token_file):
if line_idx % 2 == 0:
continue # Have twice as many token lines as needed because the sentence were duplicated.
split_line = line.split('\t')
det1_token_idxs.append(int(split_line[0]))
nn1_token_idxs.append(int(split_line[1]))
det2_token_idxs.append(int(split_line[2]))
nn2_token_idxs.append(int(split_line[3]))
all_layers_original_starts = []
all_layers_nn1_parse_starts = []
all_layers_nn2_parse_starts = []
for layer in test_layers:
# Read in how the probabilities got updated.
original_start_probs = []
nn1_parse_updated_start_probs = []
nn2_parse_updated_start_probs = []
with open(counterfactual_dir + probe_type + str(layer) + '/updated_probs.txt', 'r') as results_file:
for line_idx, line in enumerate(results_file):
split_line = line.split('\t')
if line_idx == 0: # The first line has some cruft based on how files are generated.
continue
if line_idx % 2 == 1:
original_start_probs.append([ast.literal_eval(data)[0] for data in split_line])
nn1_parse_updated_start_probs.append([ast.literal_eval(data)[2] for data in split_line])
else:
nn2_parse_updated_start_probs.append([ast.literal_eval(data)[2] for data in split_line])
# Now we have the data, so if you want to plot probabilities for a single sentence, you can.
# Plot stuff for just a single sentence.
# for i in range(1):
# plot_sentence_probs(i)
# Dump the layer-specific data into an aggregator.
all_layers_original_starts.append(original_start_probs)
all_layers_nn1_parse_starts.append(nn1_parse_updated_start_probs)
all_layers_nn2_parse_starts.append(nn2_parse_updated_start_probs)
def get_token_idx_start_update(token_idxs):
nn1_updates = []
nn1_updates_std = []
nn2_updates = []
nn2_updates_std = []
nn1_all = []
nn2_all = []
for layer in test_layers:
layer_specific_nn1_updates = []
layer_specific_nn2_updates = []
for sentence_idx, token_idx in enumerate(token_idxs):
if token_idx == -1:
print("Invalid token, skipping")
layer_specific_nn1_updates.append(0)
layer_specific_nn2_updates.append(0)
continue
original_prob = all_layers_original_starts[layer - layer_offset][sentence_idx][token_idx]
nn1_parse_prob = all_layers_nn1_parse_starts[layer - layer_offset][sentence_idx][token_idx]
nn2_parse_prob = all_layers_nn2_parse_starts[layer - layer_offset][sentence_idx][token_idx]
layer_specific_nn1_updates.append(nn1_parse_prob - original_prob)
layer_specific_nn2_updates.append(nn2_parse_prob - original_prob)
nn1_updates.append(np.mean(layer_specific_nn1_updates))
nn1_updates_std.append(np.std(layer_specific_nn1_updates))
nn2_updates.append(np.mean(layer_specific_nn2_updates))
nn2_updates_std.append(np.std(layer_specific_nn2_updates))
nn1_all.append(layer_specific_nn1_updates)
nn2_all.append(layer_specific_nn2_updates)
return nn1_updates, nn1_updates_std, nn2_updates, nn2_updates_std, nn1_all, nn2_all
def plot_start_updates():
x_axis = [i for i in test_layers]
fig, axes = plt.subplots(nrows=4, figsize=(10, 20))
for i in range(4):
tokens = all_token_idxs[i]
_, _, _, _, nn1_all, nn2_all =\
get_token_idx_start_update(tokens)
# Now do the plotting
ax = axes[i]
ax.set_title("Start prob deltas for token " + str(i))
ax.set_xlabel('Layer idx')
ax.errorbar(x_axis, np.mean(nn1_all, axis=1), color='red', marker='s', label='NP1 Parse')
ax.errorbar(x_axis, np.mean(nn2_all, axis=1), color='blue', marker='s', label='NP2 Parse')
ax.axhline()
ax.legend(loc='upper left')
plt.savefig(counterfactual_dir + probe_type + '_token_updates.png')
plt.show()
# Plot aggregate data.
plot_start_updates()
_, _, _, _, p1_tok0, p2_tok0 = get_token_idx_start_update(all_token_idxs[0])
_, _, _, _, p1_tok1, p2_tok1 = get_token_idx_start_update(all_token_idxs[1])
_, _, _, _, p1_tok2, p2_tok2 = get_token_idx_start_update(all_token_idxs[2])
_, _, _, _, p1_tok3, p2_tok3 = get_token_idx_start_update(all_token_idxs[3])
def calculate_stats(p1_tokens, p2_tokens, string_label):
p1 = np.asarray(p1_tokens[0])
for p1_idx, p1_tokens_entry in enumerate(p1_tokens):
if p1_idx == 0:
continue
p1 = p1 + np.asarray(p1_tokens_entry)
p2 = np.asarray(p2_tokens[0])
for p2_idx, p2_tokens_entry in enumerate(p2_tokens):
if p2_idx == 0:
continue
p2 = p2 + np.asarray(p2_tokens_entry)
for layer in range(p1.shape[0]):
stat, p = wilcoxon(p1[layer], p2[layer], alternative='greater')
_, less_p = wilcoxon(p1[layer], p2[layer], alternative='less')
if p < 0.01:
print("Sig. greater:\t", string_label, "for layer", layer + layer_offset)
continue
if less_p < 0.01:
print("Sig. less:\t", string_label, "for layer", layer + layer_offset)
continue
print("Not significant for layer", layer + layer_offset)
print()
calculate_stats((p1_tok0, p1_tok1), (p2_tok0, p2_tok1), "NP1")
calculate_stats((p1_tok2, p1_tok3), (p2_tok2, p2_tok3), "NP2")
parse1_np1_delta = | np.asarray(p1_tok0) | numpy.asarray |
from __future__ import print_function
import ast
import baker
import logging
import math
import numpy as np
from sklearn.preprocessing import MaxAbsScaler
from tqdm import tqdm
import core
from core.cascade import load_data, load_data_file, load_costs_data, load_model, save_model, group_counts, group_offsets
from core.metrics import test_all, test_ndcg
def _predict(cascade, x, qid, return_stages=False):
"""Run prediciton"""
preds, indexes = _init_predict(x)
if return_stages:
stagewise_results = []
for stage in cascade:
result = _partial_predict(stage, preds, indexes, x, qid)
stagewise_results.append(result)
preds, indexes = result
return stagewise_results
else:
for stage in cascade:
preds, indexes = _partial_predict(stage, preds, indexes, x, qid)
return preds, indexes
def _init_predict(x):
"""Initialze the predictions and indexes"""
preds = np.full(x.shape[0], -1, dtype=float)
indexes = np.arange(x.shape[0], dtype=int)
return preds, indexes
def _partial_predict(stage, preds, indexes, x, qid):
"""Run partial prediction by executing one cascade stage"""
prune, model = stage
if prune:
new_indexes = []
for a, b in group_offsets(qid[indexes]):
idx = indexes[a:b]
ranked_idx = idx[np.argsort(preds[idx])[::-1]]
cutoff = int(math.ceil(prune['beta'] * (b - a))) # prevent generating empty ranked lists
if cutoff == 0:
print(ranked_idx, prune['beta'], b - a)
new_indexes.extend(ranked_idx[:cutoff])
new_indexes = np.array(sorted(new_indexes))
else:
new_indexes = indexes.copy()
new_preds = preds.copy()
new_scores = np.dot(x[new_indexes], model)
new_preds[new_indexes] = new_preds[new_indexes] + new_scores # to work around the numpy qfunc 'add' bug
return new_preds, new_indexes
def predict(cascade, test_data, costs, output_trec_run=None, output_eval=None):
"""Run prediction using the cascade."""
x, y, qid, docno = test_data
x = x.toarray()
# NOTE: the cost-aware evaluation protocol is implemented differently here.
# `extracted_count` is currently stagewise and does not keep track of
# previously extracted features. So to compute the total cascade cost, we
# need to add all the stagewise costs together.
cost_spent_weighted = 0
stagewise_results = _predict(cascade, x, qid, return_stages=True)
for i, ((prune, model), (preds, indexes)) in enumerate(zip(cascade, stagewise_results)):
test_metrics = test_all(preds, y, qid, 1)
print('stage %i: '
'test ERR@5/10/20 %0.4f/%0.4f/%0.4f, '
'test NDCG@5/10/20 %0.4f/%0.4f/%0.4f, '
'test P@5/10/20 %0.4f/%0.4f/%0.4f' %
(i,
test_metrics['err@5'], test_metrics['err@10'], test_metrics['err@20'],
test_metrics['ndcg@5'], test_metrics['ndcg@10'], test_metrics['ndcg@20'],
test_metrics['p@5'], test_metrics['p@10'], test_metrics['p@20']))
n_used_features = len(np.flatnonzero(model))
n_active_docs = len(indexes)
extracted_count = (model != 0).astype(float) * len(indexes)
# NOTE: note the +=
cost_spent_weighted += np.sum(costs * extracted_count)
print(' weighted L1 %f, cascade features %i, num docs %i, cascade cost %0.2f' %
(np.nan,
n_used_features,
n_active_docs,
cost_spent_weighted / float(x.shape[0])))
if output_trec_run:
with file(output_trec_run, 'wb') as output:
core.cascade.print_trec_run(output, stagewise_results[-1][0], y, qid, docno)
logging.info('TREC run saved to %s' % output_trec_run)
def train(train_data, valid_data, costs, importance, n_stages=0,
gamma=0.1, beta_values=[1.0], use_query_features=False):
"""Learn one ranker with SGD and L1 regularization.
Args:
n_stages: number of rankers in the cascade
strategies: a dict of callback functions
"""
x_train, y_train, qid_train, _ = train_data
x_train = x_train.toarray()
# FIXME: validation data manually turned off
# for weird reasons, validation based early stopping doesn't work well
valid_data = None
if valid_data:
x_valid, y_valid, qid_valid, _ = valid_data
x_valid = x_valid.toarray()
n_queries = np.unique(qid_train).shape[0]
n_features = x_train.shape[1]
n_stages = n_stages or n_features # n_stages = n_features if set to None
weights = np.ones(n_queries, dtype=float) / n_queries
C_cascade = np.zeros(n_queries, dtype=float)
cascade = []
# NOTE: gamma is normalized by the maximum cost times the number of docs
max_cost = max(np.max(costs), 1)
C_normalizer = float(max_cost) * x_train.shape[0]
best_perf_train, best_perf_valid = -np.inf, -np.inf
best_cascade = None
# The cascade doesn't like query features...
features = []
if use_query_features:
for j, _ in enumerate(costs):
features.append(j)
else:
for j, _ in enumerate(costs):
for a, b in group_offsets(qid_train):
if (x_train[a:b, j] != x_train[a, j]).any():
features.append(j)
break
used_fids = []
preds, indexes = _init_predict(x_train)
for _ in range(n_stages):
best_weighted_perf = -np.inf
best_stage = None
for k in tqdm(features, 'scan through features'):
if k in used_fids:
continue
weak_ranker = np.zeros(n_features, dtype=float)
weak_ranker[k] = 1
# for beta in np.linspace(0, 1, 4)[1:]:
for beta in beta_values:
prune = {'beta': beta}
new_preds, new_indexes = _partial_predict((prune, weak_ranker),
preds, indexes, x_train, qid_train)
# Eq. (6) in Wang et al. (2011)
E = np.array(test_ndcg(new_preds, y_train, qid_train, average=False))
C = costs[k] * group_counts(qid_train[new_indexes]) / C_normalizer
try:
term1 = | np.sum(weights * E / (1 - gamma * C)) | numpy.sum |
import numpy as np
from scipy.optimize import curve_fit
from scipy.optimize import fsolve, brentq
from scipy.interpolate import interp1d
import scipy.integrate
import sys
import os
import velociraptor_python_tools as vpt
from scipy.spatial import cKDTree
import h5py
import re
from constants import *
from snapshot import *
import copy
import itertools
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
def getHaloCoord(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = np.zeros(3)
if (('Xcminpot' not in catalog.keys())):# or
# (np.abs(catalog['Xcminpot'][halo])>0.1) or
# (np.abs(catalog['Ycminpot'][halo])>0.1) or
# (np.abs(catalog['Zcminpot'][halo])>0.1)):
return getHaloCoordCOM(catalog, halo, z=z, snapshottype=snapshottype, physical=physical)
if physical:
coords[0] = (catalog['Xcminpot'][halo])
coords[1] = (catalog['Ycminpot'][halo])
coords[2] = (catalog['Zcminpot'][halo])
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = (catalog['Xcminpot'][halo])*h*(1+z)
coords[1] = (catalog['Ycminpot'][halo])*h*(1+z)
coords[2] = (catalog['Zcminpot'][halo])*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = (catalog['Xcminpot'][halo])*(1+z)
coords[1] = (catalog['Ycminpot'][halo])*(1+z)
coords[2] = (catalog['Zcminpot'][halo])*(1+z)
else:
print('Snapshottype not set')
return coords
def getHaloRadius(catalog, halo, z=0, rtype='R_200crit', snapshottype='GADGET', physical=False): #Mpc/h
if physical:
return catalog[rtype][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
return catalog[rtype][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
return catalog[rtype][halo]*(1+z)
def getHaloCoordCOM(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h
coords = np.zeros(3)
if physical:
coords[0] = catalog['Xc'][halo]
coords[1] = catalog['Yc'][halo]
coords[2] = catalog['Zc'][halo]
elif snapshottype in ['GADGET', 'Gadget', 'gadget']:
coords[0] = catalog['Xc'][halo]*h*(1+z)
coords[1] = catalog['Yc'][halo]*h*(1+z)
coords[2] = catalog['Zc'][halo]*h*(1+z)
elif snapshottype in ['SWIFT', 'Swift', 'swift']:
coords[0] = catalog['Xc'][halo]*(1+z)
coords[1] = catalog['Yc'][halo]*(1+z)
coords[2] = catalog['Zc'][halo]*(1+z)
return coords
def readHaloFile(halofile):
atime,tree,numhalos,halodata,cosmodata,unitdata = vpt.ReadUnifiedTreeandHaloCatalog(halofile, desiredfields=[], icombinedfile=1,iverbose=0)
return atime,tree,numhalos,halodata,cosmodata,unitdata
def findSurroundingHaloProperties(hp, halolist, d_snap, boxsize=32.):
coords = hp['Coord']
halotree = cKDTree(coords, boxsize=boxsize)
for k in halolist:
if hp['R200'][k] == -1:
continue
halostring = hp['HaloIndex'][k]
length_of_neighbours = len(np.array(halotree.query_ball_point([hp['Coord'][k]], r=hp['R200'][k]*5)[0]))
distance, indices = halotree.query([hp['Coord'][k]], k=length_of_neighbours)
indices = np.array(indices[0])[1:]
distance = np.array(distance[0])[1:]
hp['Neighbours'][halostring] = hp['HaloIndex'][indices]
hp['Neighbour_distance'][halostring] = distance
hp['Neighbour_Velrad'][halostring] = np.zeros(len(distance))
j=0
for i in indices:
partindices = hp['Partindices'][hp['HaloIndex'][i]]
hp['Neighbour_Velrad'][halostring][j] = np.sum(d_snap['File'].get_radialvelocity(hp['Coord'][k], indices=partindices))/len(partindices)
j+=1
def fixSatelliteProblems(hp, TEMPORALHALOIDVAL=1000000000000, boxsize=32):
welke = np.where(hp['Coord'][:, 0] >= 0)[0]
halotree = cKDTree(hp['Coord'][welke], boxsize=boxsize)
toolarge = welke[np.where(hp['R200'][welke] > hp['R200'][np.argmax(hp['n_part'])]*1.2)[0]]
#print(i, toolarge)
if len(toolarge) != 0:
for tl in toolarge:
hp['M200'][tl] = -1
hp['R200'][tl] = -1
hp['hostHaloIndex'][hp['HaloIndex'][tl]==hp['hostHaloIndex']] = -2
for halo in welke:#range(len(hp['M200'])):
if hp['M200'][halo] == -1:
continue
buren = np.array(halotree.query_ball_point(hp['Coord'][halo], r = 2*hp['R200'][halo]))
if len(buren) <= 1:
continue
buren = buren[hp['R200'][buren] != -1]
if len(buren) == 0:
continue
i_largest = np.argmax(hp['n_part'][buren])
index_largest = buren[i_largest]
buren = np.delete(buren,i_largest)
coords = hp['Coord'][buren] - hp['Coord'][index_largest]
coords = np.where(np.abs(coords) > 0.5*boxsize, coords - coords/np.abs(coords)*boxsize, coords)
rad = np.sqrt(np.sum(coords*coords, axis=1))
burentemp = np.where(hp['R200'][buren]-rad+hp['R200'][index_largest] > 0)[0]
if len(burentemp) == 0:
continue
buren = buren[burentemp]
hp['hostHaloIndex'][buren] = index_largest
hp['M200'][buren] = -1
hp['R200'][buren] = -1
def findSubHaloFraction(hp, catalog):
if len(hp['hostHaloIndex']) < 10:
hp['Msub'] = np.zeros(len(hp['M200']))
return 0
i_hostH = np.where(hp['hostHaloIndex'] > -1)[0]
hp['Msub'] = np.zeros(len(hp['M200']))
for i in i_hostH:
isattemp = np.where(hp['HaloID'][i] == catalog['ID'])[0]
hp['Msub'][hp['hostHaloIndex'][i]] += catalog['Mass_FOF'][isattemp]
def buildHaloDictionary(Hydro=None, partType=None, multiple=None):
if ('DM' in partType) or ('H' in partType) or ('S' in partType):
return buildHaloDictionary_nieuw(partType=partType, multiple=multiple)
haloproperties = {}
if partType is None:
if Hydro is None:
sys.exit("buildHaloDictionary should have an entry for either Hydro or partType")
if partType is not None:
if partType in [0, 2, 3, 4, 5]:
sys.exit("Bestaat nog niet voor partType = %i" %partType)
elif partType == 7:
Hydro = True
elif partType == 8:
Hydro = True
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable'])
if Hydro:
haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if partType == 8:
haloarray.extend(['lambdaS', 'DensityS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in haloarray:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
else:
haloproperties[key] = np.zeros(0)
return haloproperties
def allocateSizes(key, lengte):
if key in ['R200', 'M200', 'redshift', 'lambda', 'Vmax', 'Rmax', 'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'lambdaDM', 'lambdaH',
'DMFraction', 'HFraction', 'lambdaS', 'SFraction']:
return np.ones(lengte[0])*-1
if key in ['HaloIndex', 'HaloID', 'snapshot', 'Npart', 'NpartDM', 'NpartH','NpartS',
'n_part', 'MaxRadIndex', 'hostHaloIndex', 'Tail', 'Head',
'RootHead', 'RootTail']:
return np.ones(lengte[0]).astype(int)*-1
elif key in ['Coord', 'Vel']:
return np.ones((lengte[0], 3))*-1
elif key in ['Density', 'AngularMomentum', 'Velrad', 'Mass_profile',
'DensityDM', 'DensityH', 'DMFraction_profile', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH', 'lambdaS', 'DensityS',
'SFraction_profile', 'MassS_profile','VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']:
return np.zeros((lengte[0], lengte[1]))
elif key in ['Npart_profile', 'NpartDM_profile', 'NpartH_profile', 'NpartS_profile']:
return np.zeros((lengte[0], lengte[1])).astype(int)
def buildHaloDictionary_nieuw(partType=None, multiple=None):
haloproperties = {}
if partType is None:
sys.exit("buildHaloDictionary should have an entry for partType")
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'Tail', 'Head', 'Vmax_part', 'Rmax_part',
'Vmax_interp', 'Rmax_interp', 'RootHead', 'RootTail'])
if 'H' in partType:
haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'NpartDM_profile','NpartH', 'NpartDM',
'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile',
'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH'])
if 'S' in partType:
haloarray.extend(['lambdaS', 'DensityS', 'NpartS',
'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
for key in haloarray:
if (multiple is not None) and (key=='Partindices'):
haloproperties[key] = {}
elif multiple is not None:
haloproperties[key] = allocateSizes(key, multiple)
else:
haloproperties[key] = None
return haloproperties
def quantity_keys():
return (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Npart', 'NpartDM',
'NpartH', 'NpartS', 'Vel', 'n_part', 'Tail', 'Head', 'RootHead', 'RootTail',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'lambdaDM', 'lambdaH',
'lambdaS', 'DMFraction', 'HFraction', 'SFraction',
'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp'])
def profile_keys():
return (['HaloIndex', 'HaloID', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'MassTable',
'Mass_profile', 'MaxRadIndex', 'Density', 'DensityDM', 'DensityH', 'NpartH_profile', 'DMFraction_profile',
'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature',
'AngularMomentumDM', 'AngularMomentumH', 'NpartS_profile', 'SFraction_profile', 'MassS_profile',
'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS'])
def convertVel_keys():
return (['HaloIndex', 'HaloID', 'Npart', 'NpartDM', 'NpartH', 'NpartS', 'n_part', 'Vel', 'Coord', 'R200', 'M200',
'Tail', 'Head', 'RootHead', 'RootTail', 'redshift', 'snapshot', 'hostHaloIndex'])
def findHaloPropertiesInSnap_nieuw(catalog, d_snap, Nhalo=100, halolist=None,
startHalo=0, d_radius=None, d_partType = None, d_runparams=None,
partdata = None, TEMPORALHALOIDVAL=1000000000000, boxsize=None, debug=False):
#Keeping all VELOCIraptor haloes, but saving 'wrong' haloes as HaloIndex = -1
if d_runparams['VELconvert'] == False:
boxsize = d_snap['File'].boxsize
partType = d_partType['particle_type']
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, d_snap['snapshot']))
if 'profile' in d_radius.keys():
ylen = len(d_radius['profile'])
else:
ylen = 0
haloproperties = buildHaloDictionary(partType=partType, multiple=[Nhalo, ylen])
if len(catalog['Mass_200crit']) == 0:
return haloproperties
# if (d_runparams['VELconvert'] == False):
# sortorder = np.argsort(catalog['Mass_tot'][:])[::-1]
# sortorderinvert = np.argsort(sortorder)
# for key in catalog.keys():
# catalog[key][:] = catalog[key][sortorder]
# else:
#sortorder = np.arange(len(catalog['Mass_tot'])).astype(int)
# if partdata is not None:
# for key in partdata.keys():
# partdata[key][:] = partdata[key][sortorder]
if halolist is None:
haloindices = np.arange(startHalo, startHalo+Nhalo).astype(int)
use_existing_r200 = False
else:
haloindices = (halolist%TEMPORALHALOIDVAL - 1).astype(int)
use_existing_r200 = False
halo_i = -1
for halo in haloindices:
halo_i += 1
#if halolist is not None:
# print('Computing properties for halo %i'%halo)
if halo%10000==0:
print('Computing properties for halo %i-%i' %(halo, halo+10000))
if halo > len(catalog['Xc'])-1:
print("Nhalo > N(velociraptor haloes)")
break
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'],
physical=d_runparams['Physical'])
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo_i, z=d_snap['redshift'],
rtype = d_radius['Rchoice'], snapshottype=d_runparams['SnapshotType'],
physical=d_runparams['Physical'])
satellite = False
#Trusting VELOCIraptor not to falsely identify haloes as satellites
if (halolist is None) and (catalog['hostHaloID'][halo_i] != -1):
satellite = True
hostHaloIDtemp = np.where(catalog['hostHaloID'][halo_i]==catalog['ID'])[0]
if len(hostHaloIDtemp) == 0:
hostHaloIDtemp = -2
else:
hostHaloIDtemp = hostHaloIDtemp[0]
else:
hostHaloIDtemp = -1
#All happens here
if debug:
start_time = time.time()
print('M200: ', catalog['Mass_200crit'][halo_i])
print('R200: ', catalog['R_200crit'][halo_i])
print('ID: ', catalog['ID'][halo_i])
if d_runparams['VELconvert']:
if d_runparams['ParticleDataType'] != 'None':
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType, particledata=partdata['Particle_Types'], d_partType=d_partType)
else:
halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'],
partType=partType)
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
elif d_runparams['ParticleDataType'] == 'None':
#print("Halo", halo)
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug)
else:
#print("Halo", halo,len(partdata['Particle_IDs'][sortorder[halo]]))
halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius,
partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200,
profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug,
particledata=partdata['Particle_IDs'][halo_i])
if halopropertiestemp is None:
if debug:
print("De halo is leeg???")
continue
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
start_time = time.time()
if d_runparams['TreeData']:
halopropertiestemp['Tail'] = catalog['Tail'][halo_i]-1
halopropertiestemp['Head'] = catalog['Head'][halo_i]-1
halopropertiestemp['RootTail'] = catalog['RootTail'][halo_i]-1
halopropertiestemp['RootHead'] = catalog['RootHead'][halo_i]-1
if d_runparams['VELconvert'] == False:
if halopropertiestemp is None:
halopropertiestemp = buildHaloDictionary(partType=partType)
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['HaloIndex'] = -1
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
halopropertiestemp['Coord'] = coords
else:
if satellite:
halopropertiestemp['Npart'] = catalog['npart'][halo_i]
halopropertiestemp['n_part'] = catalog['npart'][halo_i]
halopropertiestemp['HaloID'] = catalog['ID'][halo_i]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoordCOM(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'], physical=d_runparams['Physical'])
rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/
halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
doorgaan = False
if (d_runparams['Profiles'] == True) and (key in profile_keys()):
doorgaan = True
if (d_runparams['Quantities'] == True) and (key in quantity_keys()):
doorgaan = True
if (d_runparams['VELconvert'] == True) and (key in convertVel_keys()):
doorgaan = True
if doorgaan == False:
continue
if key in ['Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
if (halopropertiestemp['HaloIndex'] == -1) and (key != 'HaloID'):
continue
if halopropertiestemp[key] is None:
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
else:
haloproperties[key][halo] = halopropertiestemp[key]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
if 'profile' in d_radius.keys():
haloproperties['Radius'] = d_radius['profile']
haloproperties['redshift'] = np.array([d_snap['redshift']])
haloproperties['snapshot'] = np.array([d_snap['snapshot']])
j = 0
if d_runparams['VELconvert'] == False:
haloproperties['MassTable'] = d_snap['File'].mass
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0:
waar = np.where(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
if d_runparams['TreeData']:
haloproperties['Tail'] = haloproperties['Tail'].astype(int)
haloproperties['Head'] = haloproperties['Head'].astype(int)
haloproperties['RootTail'] = haloproperties['RootTail'].astype(int)
haloproperties['RootHead'] = haloproperties['RootHead'].astype(int)
if (len(haloproperties['Coord']) > 0) and (halolist is None):
if d_runparams['Quantities'] or d_runparams['VELconvert']:
print("Reassigning satellite haloes")
fixSatelliteProblems(haloproperties, boxsize=boxsize)
return haloproperties
def findHaloPropertiesInSnap(catalog, snappath, snapshot, partType=8, Nhalo=100,
startHalo=0, softeningLength=0.002, Radius=1., partlim=200, sortorder=None,
boxsize=32, TEMPORALHALOIDVAL=1000000000000, particledata=None, mass=False):
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot))
haloproperties = buildHaloDictionary(partType=partType, multiple=True)
if len(catalog['Mass_tot']) == 0:
return haloproperties
if sortorder is None:
sortorder = np.argsort(catalog['Mass_tot'][:])[::-1]
sortorderinvert = np.argsort(sortorder)
else:
sortorderinvert = np.argsort(sortorder)
d_snap = {}
d_snap['snapshot'] = snapshot
limiet = 0
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=partType, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
for key in catalog.keys():
catalog[key][:] = catalog[key][sortorder]
for halo in range(startHalo, startHalo+Nhalo):
#start_time = time.time()
#print(halo)
#print(catalog['npart'][halo])
if halo%1000==0:
print('Computing properties for halo %i-%i' %(halo, halo+1000))
if halo > len(catalog['Xc'])-1:
print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet))
print("Coordinates: ", coords)
break
if limiet > 500: #Only computing sats
if catalog['hostHaloID'][halo] == -1:
continue
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
coords = coords%boxsize
radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift)
satellite = False
if (catalog['npart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0):
startHalo += 1
# haloproperties['TreeBool'][halo] = 0
continue
#Checking for dissapeared host haloes
if (catalog['hostHaloID'][halo] != -1) and len(haloproperties['HaloID'])>1:
haloindextemp = np.where((haloproperties['HaloID']%TEMPORALHALOIDVAL)==catalog['hostHaloID'][halo]%TEMPORALHALOIDVAL)[0]
if len(haloindextemp) == 0:
hostHaloIDtemp = -1
if catalog['npart'][halo] < partlim/2.:
hostHaloIDtemp = -2
satellite = True
else:
afstandtemp = (haloproperties['Coord'][haloindextemp[0]]-coords)
afstandtemp = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
afstandtemp = (np.sum(afstandtemp*afstandtemp))**0.5
if afstandtemp < haloproperties['R200'][haloindextemp[0]]: # and catalog['npart'][halo] > 50:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = haloindextemp[0]
satellite = True
else:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = -1
else:
hostHaloIDtemp = -1
#All happens here
halopropertiestemp = findHaloProperties(d_snap, halo, coords, Radius, partType=partType,
rad=radhier, mass=mass, satellite=satellite, partlim=partlim)
#print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
if halopropertiestemp is None:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
if satellite == False and halopropertiestemp['Npart'] < partlim:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
limiet = 0
if satellite:
halopropertiestemp['Npart'] = catalog['npart'][halo]
#start_time = time.time()
halopropertiestemp['n_part'] = catalog['npart'][halo]
halopropertiestemp['HaloID'] = catalog['ID'][halo]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
if key in ['TreeBool', 'Tail', 'Head', 'Radius', 'MassTable', 'snapshot', 'redshift']:
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
elif key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif halo == startHalo:
haloproperties[key] = [halopropertiestemp[key]]
else:
haloproperties[key] = np.concatenate((haloproperties[key], [halopropertiestemp[key]]))
#print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
haloproperties['Radius'] = Radius
haloproperties['redshift'] = np.array([d_snap['File'].redshift])
haloproperties['snapshot'] = np.array([d_snap['snapshot']])
haloproperties['MassTable'] = d_snap['File'].mass
j = 0
for i in d_snap['File'].readParticles:
if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0:
waar = np.where(d_snap['File'].partTypeArray == i)[0][0]
haloproperties['MassTable'][i] = d_snap['File'].masses[waar]
j += 1
findSubHaloFraction(haloproperties, catalog)
print("Reassigning satellite haloes")
if len(haloproperties['Coord']) > 0:
if 'DMFraction' in haloproperties.keys():
Hydro = True
else:
Hydro = False
fixSatelliteProblems(haloproperties, Hydro = Hydro)
#print("Computing subhalo fraction")
print(haloproperties.keys())
return haloproperties
def findHaloProperties(d_snap, halo, Coord, fixedRadius, r200fac = 8, partType=None, rad=None, satellite=False,
partlim=200, profiles=False, quantities=True, particledata=None, debug=False, use_existing_r200=False):
haloproperties = buildHaloDictionary(partType=partType)
if isinstance(fixedRadius, dict):
if 'profile' in fixedRadius.keys():
radprofile = fixedRadius['profile']
radfrac = fixedRadius['Rfrac']
else:
radfrac = fixedRadius['Rfrac']
else:
radprofile = fixedRadius
radfrac = r200fac
snap = d_snap['File']
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = halo#catalog['ID'][halo]
snap.debug = debug
coord = Coord
if debug:
start_time = time.time()
if rad is None:
rad = fixedRadius[-1]
snap.get_temphalo(coord, rad, r200fac=radfrac, fixedRadius=radprofile, satellite=satellite,
particledata=particledata, partlim=partlim, initialise_profiles=profiles, use_existing_r200=use_existing_r200)
if len(snap.temphalo['indices']) < partlim or len(snap.temphalo['indices'])<=1:
if debug:
print('Halo has %i particles, and is thus too small' %len(snap.temphalo['indices']))
return None
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo initiated', snap.temphalo['R200'])
if profiles:
if debug:
start_time = time.time()
snap.get_temphalo_profiles()
snap.get_specific_angular_momentum_radius(coord, radius=snap.temphalo['Radius'])
haloproperties['AngularMomentum'] = snap.temphalo['AngularMomentum']
haloproperties['Density'] = snap.temphalo['profile_density']
haloproperties['Velrad'] = snap.temphalo['profile_vrad']
haloproperties['Npart_profile'] = snap.temphalo['profile_npart']
haloproperties['Mass_profile'] = snap.temphalo['profile_mass']
haloproperties['MaxRadIndex'] = snap.temphalo['MaxRadIndex']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'halo profiles calculated')
haloproperties['Coord'] = snap.temphalo['Coord']
#Virial radius and mass
R200 = snap.temphalo['R200']
haloproperties['M200']= snap.temphalo['M200']
haloproperties['R200'] = R200
#Assigning halo properties
if quantities:
if debug:
start_time = time.time()
if (satellite == False) or (particledata is not None):
snap.get_spin_parameter()
haloproperties['lambda'] = snap.temphalo['lambda']
haloproperties['lambda'] = snap.temphalo['lambda']
snap.get_Vmax_Rmax()
haloproperties['Vmax_part'] = snap.temphalo['Vmax_part']
haloproperties['Rmax_part'] = snap.temphalo['Rmax_part']
haloproperties['Vmax_interp'] = snap.temphalo['Vmax_interp']
haloproperties['Rmax_interp'] = snap.temphalo['Rmax_interp']
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'lambda calculated')
haloproperties['Vel'] = snap.temphalo['Vel']
haloproperties['Partindices'] = snap.temphalo['indices']
haloproperties['Npart'] = len(haloproperties['Partindices'])
# if satellite == False:
# haloproperties['Virial_ratio'] = snap.get_virial_ratio(1000)
# else:
# haloproperties['Virial_ratio'] = -1
if debug:
start_time = time.time()
if len(snap.readParticles) > 1:
nietnulhier=np.where(haloproperties['Mass_profile']!=0)
for i_pT in range(len(snap.readParticles)):
if quantities:
if (satellite == False) or (particledata is not None):
haloproperties['lambda'+snap.namePrefix[i_pT]] = snap.temphalo['lambda'+snap.namePrefix[i_pT]]
else:
haloproperties['lambda'+snap.namePrefix[i_pT]] = -1
haloproperties['Npart'+snap.namePrefix[i_pT]] = snap.temphalo['Npart'+snap.namePrefix[i_pT]]
haloproperties[snap.namePrefix[i_pT]+'Fraction'] = snap.temphalo[snap.namePrefix[i_pT]+'Fraction']
if profiles:
haloproperties['AngularMomentum'+snap.namePrefix[i_pT]] = snap.temphalo['AngularMomentum'+snap.namePrefix[i_pT]]
haloproperties['Density'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'density']
haloproperties['Npart'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'npart']
haloproperties['Velrad'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'vrad']
haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'mass']
if snap.readParticles[i_pT] == 0:
haloproperties['Temperature'] = snap.temphalo['profile_temperature']
elif snap.readParticles[i_pT] == 5:
haloproperties['AgeS'] = snap.temphalo['profile_Sage']
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'] = np.zeros_like(haloproperties['Mass_profile'])
haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'][nietnulhier] = haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'][nietnulhier]/haloproperties['Mass_profile'][nietnulhier]
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'particle types done')
if particledata is not None:
if debug:
start_time = time.time()
snap.delete_used_indices(snap.temphalo['indices'])
if debug:
print("--- %s seconds ---" % (time.time() - start_time), 'Deleted particles')
return haloproperties
def copyVELOCIraptor(catalog, halo, Coord, redshift, d_partType=None, partType=None, particledata=None):
c = constant(redshift=redshift)
c.change_constants(redshift)
comoving_rhocrit200 = deltaVir*c.rhocrit_Ms_Mpci3*h/(h*(1+redshift))**3
haloproperties = buildHaloDictionary(partType=partType)
haloproperties['HaloIndex'] = halo
haloproperties['HaloID'] = catalog['ID'][halo]
haloproperties['n_part'] = catalog['npart'][halo]
haloproperties['Coord'] = Coord
#Virial radius and mass
haloproperties['M200'] = catalog['Mass_200crit'][halo]*h
haloproperties['R200'] = (haloproperties['M200']*1.e10/(comoving_rhocrit200 * 4./3. * np.pi))**(1./3.)
#Assigning halo properties
haloproperties['Vel'] = np.array([catalog['VXc'][halo], catalog['VYc'][halo], catalog['VZc'][halo]])*(1+redshift)
haloproperties['Npart'] = catalog['npart'][halo]
if (particledata is not None) and (len(d_partType['particle_type']) > 1):
allpart = len(particledata[halo])
for i_pT in range(len(d_partType['particle_type'])):
if allpart == 0:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = 0
else:
haloproperties['Npart'+d_partType['particle_type'][i_pT]] = len(np.where(particledata[halo] == d_partType['particle_number'][i_pT])[0])
#print(d_partType['particle_type'][i_pT], d_partType['particle_number'][i_pT], haloproperties['Npart'+d_partType['particle_type'][i_pT]])
return haloproperties
def everythingOutside(haloproperties, d_snap):
allpin = np.zeros(0)
iets=0
allpinBool = np.array([True]*np.sum(d_snap['File'].npart))
for i in haloproperties['HaloIndex']:
allpinBool[haloproperties['Partindices'][i]] = False
outsideIndices = | np.where(allpinBool) | numpy.where |
# tools to ease plotting
# first, adjust params in matplotlib
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['axes.linewidth'] = 0.1
matplotlib.rcParams['xtick.labelsize'] = 4
matplotlib.rcParams['xtick.major.width'] = 0.1
matplotlib.rcParams['xtick.major.size'] = 1
matplotlib.rcParams['ytick.labelsize'] = 4
matplotlib.rcParams['ytick.major.width'] = 0.1
matplotlib.rcParams['ytick.major.size'] = 1
# imports
import matplotlib.pyplot as plt
import os
import logging
import numpy as np
import matplotlib as mpl
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch, Rectangle
from matplotlib.font_manager import FontProperties
from matplotlib import gridspec
from matplotlib.ticker import FormatStrFormatter
from tronn.util.h5_utils import AttrKeys
from tronn.util.utils import DataKeys
# heavily guided by aparent (https://github.com/johli/aparent) visualization code
FONTPROP = FontProperties(family="Arial", weight="bold")
FONTPROP = FontProperties(family="DejaVu Sans", weight="bold")
LETTERS = {
"T" : TextPath((-0.305, 0), "T", size=1, prop=FONTPROP),
"G" : TextPath((-0.384, 0), "G", size=1, prop=FONTPROP),
"A" : TextPath((-0.35, 0), "A", size=1, prop=FONTPROP),
"C" : TextPath((-0.366, 0), "C", size=1, prop=FONTPROP),
}
COLOR_SCHEME = {
"A": "darkgreen",
"C": "blue",
"G": "orange",
"T": "red"
}
IDX_TO_LETTER = {
0: "A",
1: "C",
2: "G",
3: "T"
}
def plot_letter(letter, x, y, yscale=1, ax=None, color=None, alpha=1.0):
"""plot letters at appropriate positions
"""
globscale = 1.35
text = LETTERS[letter]
chosen_color = COLOR_SCHEME[letter]
if color is not None :
chosen_color = color
t = mpl.transforms.Affine2D().scale(1*globscale, yscale*globscale) + \
mpl.transforms.Affine2D().translate(x,y) + ax.transData
p = PathPatch(text, lw=0, fc=chosen_color, alpha=alpha, transform=t)
if ax != None:
ax.add_artist(p)
return p
def plot_pwm(
array,
plot_file):
"""plot pwm
"""
# figure out widths and heights (matches plot weights below)
desired_width = 6 * (array.shape[0] / 160.)
desired_width = 6 * (array.shape[0] / 140.) # NOTE: manually chosen to match importance scores len 120bp
width_to_height_factor = 8 #6
width_height_ratio = array.shape[0] / float(array.shape[1])
desired_height = desired_width * width_to_height_factor / width_height_ratio / 10.
# set up fig
figsize=(desired_width, desired_height)
f = plt.figure(figsize=figsize)
# convert to entropy
entropy = np.zeros(array.shape)
entropy[array > 0] = array[array > 0] * -np.log2(array[array > 0])
entropy = np.sum(entropy, axis=1)
conservation = 2 - entropy
# set up plot area
height_base = 0.0
logo_height = 1.0
logo_ax = plt.gca()
# go through each position and bp
for j in range(array.shape[0]) :
sort_index = np.argsort(array[j, :])
for ii in range(0, 4) :
i = sort_index[ii]
nt_prob = array[j, i] * conservation[j]
nt = ''
if i == 0 :
nt = 'A'
elif i == 1 :
nt = 'C'
elif i == 2 :
nt = 'G'
elif i == 3 :
nt = 'T'
if ii == 0 :
plot_letter(nt, j + 0.5, height_base, nt_prob * logo_height, logo_ax, color=None)
else :
prev_prob = np.sum(array[j, sort_index[:ii]] * conservation[j] + 0.001) * logo_height
plot_letter(nt, j + 0.5, height_base + prev_prob, nt_prob * logo_height, logo_ax, color=None)
plt.xlim((0, array.shape[0]))
plt.ylim((0, 2))
plt.xticks([], [])
plt.yticks([], [])
plt.axis('off')
logo_ax.axhline(y=0.0 + height_base, color='black', linestyle='-', linewidth=2/10.)
plt.savefig(plot_file, transparent=True)
return
def plot_weights(
array,
ax,
height_min=-1,
height_max=1,
x_lab=False,
y_lab=False,
sig_array=None):
"""plot weights
"""
# array is (seqlen, 4)
height_base = 0.0
# for each position
# TODO include option to plot base pairs in gray
for pos_idx in range(array.shape[0]):
letter_idx = np.argmax( | np.abs(array[pos_idx]) | numpy.abs |
#!/usr/bin/env python
"""Carry out standard MBAR analysis on 1D REMC simulation output.
The exchange variable is assumed to be temperature.
"""
import argparse
import numpy as np
from scipy import interpolate
from origamipy import conditions
from origamipy import biases
from origamipy import files
from origamipy import outputs
from origamipy import decorrelate
from origamipy import mbar_wrapper
from origamipy import utility
def main():
args = parse_args()
system_file = files.JSONStructInpFile(args.system_filename)
staple_lengths = utility.calc_staple_lengths(system_file)
staple_types = utility.calc_num_staple_types(system_file)
num_scaffold_domains = utility.calc_num_scaffold_domains(system_file)
inp_filebase = f'{args.outs_dir}/{args.filebase}'
fileformatter = construct_fileformatter()
all_conditions = conditions.construct_remc_conditions(
args.temps, args.staple_m, fileformatter, staple_lengths)
sim_collections = []
for rep in range(args.reps):
rep_sim_collections = outputs.create_sim_collections(
inp_filebase, all_conditions, rep)
sim_collections.append(rep_sim_collections)
decor_outs = decorrelate.DecorrelatedOutputs(
sim_collections, all_conditions=all_conditions,
rep_conditions_equal=True)
decor_outs.read_decors_from_files()
mbarw = mbar_wrapper.MBARWrapper(decor_outs)
mbarw.perform_mbar()
# Calculate expectations and LFEs for simulations temperatures
all_se_tags = decor_outs.all_series_tags
if args.tags == None:
se_tags = all_se_tags
else:
se_tags = args.tags
out_filebase = f'{args.analysis_dir}/{args.filebase}'
mbarw.calc_all_expectations(out_filebase, all_se_tags, all_conditions)
lfes_filebase = f'{out_filebase}_lfes'
mbarw.calc_all_1d_lfes(lfes_filebase, se_tags, all_conditions)
# Estimate melting temperature
guess_temp = estimate_halfway_temp(
mbarw, args.tag, all_conditions, args.assembled_op)
if args.guess_temp is not None:
guess_temp = args.guess_temp
print('Guess temperature: {:.3f} K'.format(
np.around(guess_temp, decimals=3)))
conds = conditions.SimConditions(
{'temp': guess_temp,
'staple_m': args.staple_m,
'bias': biases.NoBias()},
fileformatter, staple_lengths)
bias = biases.NoBias()
melting_temp = est_melting_temp_and_barrier(
mbarw, fileformatter, staple_lengths, conds, bias, guess_temp,
args.staple_m)
conds = conditions.SimConditions(
{'temp': melting_temp,
'staple_m': args.staple_m,
'bias': biases.NoBias()},
fileformatter, staple_lengths)
# Calculate expectations and LFEs for melting temperature
exps_filebase = f'{out_filebase}-melting'
lfes_filebase = f'{out_filebase}_lfes-melting'
mbarw.calc_all_1d_lfes(lfes_filebase, se_tags, [conds])
mbarw.calc_all_expectations(exps_filebase, all_se_tags, [conds])
# Calculate expectations along OP slices
mbarws = []
all_decor_outs = []
sampled_ops = []
for i in range(1, args.assembled_op + 1):
sim_collections = []
for rep in range(args.reps):
rep_sim_collections = outputs.create_sim_collections(
inp_filebase, all_conditions, rep)
sim_collections.append(rep_sim_collections)
decor_outs = decorrelate.DecorrelatedOutputs(
sim_collections, all_conditions=all_conditions,
rep_conditions_equal=True)
decor_outs.read_decors_from_files(data_only=True)
filtered_count = decor_outs.filter_collections(args.tag, i)
if filtered_count == 0:
continue
sampled_ops.append(i)
all_decor_outs.append(decor_outs)
mbarw = mbar_wrapper.MBARWrapper(decor_outs)
mbarw.perform_mbar()
mbarws.append(mbarw)
all_tags = []
for i in range(1, staple_types + 1):
all_tags.append(f'staples{i}')
all_tags.append(f'staplestates{i}')
for i in range(num_scaffold_domains):
all_tags.append(f'domainstate{i}')
aves, stds = calc_reduced_expectations(
conds, mbarws, all_decor_outs, all_tags)
aves = np.concatenate([[sampled_ops], np.array(aves).T])
aves_file = files.TagOutFile(f'{out_filebase}-{args.tag}.aves')
aves_file.write([args.tag] + all_tags, aves.T)
stds = np.concatenate([[sampled_ops], np.array(stds).T])
stds_file = files.TagOutFile(f'{out_filebase}-{args.tag}.stds')
stds_file.write([args.tag] + all_tags, stds.T)
def calc_reduced_expectations(conds, mbarws, all_decor_outs, tags):
all_aves = []
all_stds = []
for mbarw, decor_outs in zip(mbarws, all_decor_outs):
aves = []
stds = []
for tag in tags:
ave, std = mbarw.calc_expectation(tag, conds)
aves.append(ave)
stds.append(std)
all_aves.append(aves)
all_stds.append(stds)
return all_aves, all_stds
def est_melting_temp_and_barrier(
mbarw, fileformatter, staple_lengths, conds, bias, guess_temp,
staple_m):
# try:
# melting_temp = mbarw.estimate_melting_temp(conds, guess_temp)
# except:
melting_temp = mbarw.estimate_melting_temp_endpoints(conds, guess_temp)
conds = conditions.SimConditions(
{'temp': melting_temp,
'staple_m': staple_m,
'bias': bias},
fileformatter, staple_lengths)
melting_temp_f = '{:.3f}'.format( | np.around(melting_temp, decimals=3) | numpy.around |
"""Functions for loading learning examples from disk and numpy arrays into tensors.
Augmentations are also called from here.
"""
import re
import cv2
import numpy as np
import augmentation.appearance
import augmentation.background
import augmentation.voc_loader
import boxlib
import cameralib
import improc
import tfu
import util
from options import FLAGS
from tfu import TRAIN
def load_and_transform3d(ex, joint_info, learning_phase, rng):
# Get the random number generators for the different augmentations to make it reproducibile
appearance_rng = util.new_rng(rng)
background_rng = util.new_rng(rng)
geom_rng = util.new_rng(rng)
partial_visi_rng = util.new_rng(rng)
output_side = FLAGS.proc_side
output_imshape = (output_side, output_side)
if 'sailvos' in ex.image_path.lower():
# This is needed in order not to lose precision in later operations.
# Background: In the Sailvos dataset (GTA V), some world coordinates
# are crazy large (several kilometers, i.e. millions of millimeters, which becomes
# hard to process with the limited simultaneous dynamic range of float32).
# They are stored in float64 but the processing is done in float32 here.
ex.world_coords -= ex.camera.t
ex.camera.t[:] = 0
box = ex.bbox
if 'surreal' in ex.image_path.lower():
# Surreal images are flipped wrong in the official dataset release
box = box.copy()
box[0] = 320 - (box[0] + box[2])
# Partial visibility
if 'surreal' in ex.image_path.lower() and 'surmuco' not in FLAGS.dataset:
partial_visi_prob = 0.5
elif 'h36m' in ex.image_path.lower() and 'many' in FLAGS.dataset:
partial_visi_prob = 0.5
else:
partial_visi_prob = FLAGS.partial_visibility_prob
use_partial_visi_aug = (
(learning_phase == TRAIN or FLAGS.test_aug) and
partial_visi_rng.rand() < partial_visi_prob)
if use_partial_visi_aug:
box = util.random_partial_subbox(boxlib.expand_to_square(box), partial_visi_rng)
# Geometric transformation and augmentation
crop_side = np.max(box[2:])
center_point = boxlib.center(box)
if ((learning_phase == TRAIN and FLAGS.geom_aug) or
(learning_phase != TRAIN and FLAGS.test_aug and FLAGS.geom_aug)):
center_point += util.random_uniform_disc(geom_rng) * FLAGS.shift_aug / 100 * crop_side
# The homographic reprojection of a rectangle (bounding box) will not be another rectangle
# Hence, instead we transform the side midpoints of the short sides of the box and
# determine an appropriate zoom factor by taking the projected distance of these two points
# and scaling that to the desired output image side length.
if box[2] < box[3]:
# Tall box: take midpoints of top and bottom sides
delta_y = np.array([0, box[3] / 2])
sidepoints = center_point + np.stack([-delta_y, delta_y])
else:
# Wide box: take midpoints of left and right sides
delta_x = np.array([box[2] / 2, 0])
sidepoints = center_point + np.stack([-delta_x, delta_x])
cam = ex.camera.copy()
cam.turn_towards(target_image_point=center_point)
cam.undistort()
cam.square_pixels()
cam_sidepoints = cameralib.reproject_image_points(sidepoints, ex.camera, cam)
crop_side = np.linalg.norm(cam_sidepoints[0] - cam_sidepoints[1])
cam.zoom(output_side / crop_side)
cam.center_principal_point(output_imshape)
if FLAGS.geom_aug and (learning_phase == TRAIN or FLAGS.test_aug):
s1 = FLAGS.scale_aug_down / 100
s2 = FLAGS.scale_aug_up / 100
zoom = geom_rng.uniform(1 - s1, 1 + s2)
cam.zoom(zoom)
r = np.deg2rad(FLAGS.rot_aug)
cam.rotate(roll=geom_rng.uniform(-r, r))
world_coords = ex.univ_coords if FLAGS.universal_skeleton else ex.world_coords
metric_world_coords = ex.world_coords
if learning_phase == TRAIN and geom_rng.rand() < 0.5:
cam.horizontal_flip()
# Must reorder the joints due to left and right flip
camcoords = cam.world_to_camera(world_coords)[joint_info.mirror_mapping]
metric_world_coords = metric_world_coords[joint_info.mirror_mapping]
else:
camcoords = cam.world_to_camera(world_coords)
imcoords = cam.world_to_image(metric_world_coords)
# Load and reproject image
image_path = util.ensure_absolute_path(ex.image_path)
origsize_im = improc.imread_jpeg(image_path)
if 'surreal' in ex.image_path.lower():
# Surreal images are flipped wrong in the official dataset release
origsize_im = origsize_im[:, ::-1]
interp_str = (FLAGS.image_interpolation_train
if learning_phase == TRAIN else FLAGS.image_interpolation_test)
antialias = (FLAGS.antialias_train if learning_phase == TRAIN else FLAGS.antialias_test)
interp = getattr(cv2, 'INTER_' + interp_str.upper())
im = cameralib.reproject_image(
origsize_im, ex.camera, cam, output_imshape, antialias_factor=antialias, interp=interp)
# Color adjustment
if re.match('.*mupots/TS[1-5]/.+', ex.image_path):
im = improc.adjust_gamma(im, 0.67, inplace=True)
elif '3dhp' in ex.image_path and re.match('.+/(TS[1-4])/', ex.image_path):
im = improc.adjust_gamma(im, 0.67, inplace=True)
im = improc.white_balance(im, 110, 145)
elif 'panoptic' in ex.image_path.lower():
im = improc.white_balance(im, 120, 138)
# Background augmentation
if hasattr(ex, 'mask') and ex.mask is not None:
bg_aug_prob = 0.2 if 'sailvos' in ex.image_path.lower() else FLAGS.background_aug_prob
if (FLAGS.background_aug_prob and (learning_phase == TRAIN or FLAGS.test_aug) and
background_rng.rand() < bg_aug_prob):
fgmask = improc.decode_mask(ex.mask)
if 'surreal' in ex.image_path:
# Surreal images are flipped wrong in the official dataset release
fgmask = fgmask[:, ::-1]
fgmask = cameralib.reproject_image(
fgmask, ex.camera, cam, output_imshape, antialias_factor=antialias, interp=interp)
im = augmentation.background.augment_background(im, fgmask, background_rng)
# Occlusion and color augmentation
im = augmentation.appearance.augment_appearance(
im, learning_phase, FLAGS.occlude_aug_prob, appearance_rng)
im = tfu.nhwc_to_std(im)
im = improc.normalize01(im)
# Joints with NaN coordinates are invalid
is_joint_in_fov = ~np.logical_or(
np.any(imcoords < 0, axis=-1), np.any(imcoords >= FLAGS.proc_side, axis=-1))
joint_validity_mask = ~np.any(np.isnan(camcoords), axis=-1)
rot_to_orig_cam = ex.camera.R @ cam.R.T
rot_to_world = cam.R.T
return dict(
image=im,
intrinsics=np.float32(cam.intrinsic_matrix),
image_path=ex.image_path,
coords3d_true=np.nan_to_num(camcoords).astype(np.float32),
coords2d_true=np.nan_to_num(imcoords).astype(np.float32),
rot_to_orig_cam=rot_to_orig_cam.astype(np.float32),
rot_to_world=rot_to_world.astype(np.float32),
cam_loc=cam.t.astype(np.float32),
joint_validity_mask=joint_validity_mask,
is_joint_in_fov=np.float32(is_joint_in_fov))
def load_and_transform2d(ex, joint_info, learning_phase, rng):
# Get the random number generators for the different augmentations to make it reproducibile
appearance_rng = util.new_rng(rng)
geom_rng = util.new_rng(rng)
partial_visi_rng = util.new_rng(rng)
# Load the image
image_path = util.ensure_absolute_path(ex.image_path)
im_from_file = improc.imread_jpeg(image_path)
# Determine bounding box
bbox = ex.bbox
if learning_phase == TRAIN and partial_visi_rng.rand() < FLAGS.partial_visibility_prob:
bbox = util.random_partial_subbox(boxlib.expand_to_square(bbox), partial_visi_rng)
crop_side = np.max(bbox)
center_point = boxlib.center(bbox)
orig_cam = cameralib.Camera.create2D(im_from_file.shape)
cam = orig_cam.copy()
cam.zoom(FLAGS.proc_side / crop_side)
if FLAGS.geom_aug:
center_point += util.random_uniform_disc(geom_rng) * FLAGS.shift_aug / 100 * crop_side
s1 = FLAGS.scale_aug_down / 100
s2 = FLAGS.scale_aug_up / 100
cam.zoom(geom_rng.uniform(1 - s1, 1 + s2))
r = np.deg2rad(FLAGS.rot_aug)
cam.rotate(roll=geom_rng.uniform(-r, r))
if FLAGS.geom_aug and geom_rng.rand() < 0.5:
cam.horizontal_flip()
# Must also permute the joints to exchange e.g. left wrist and right wrist!
imcoords = ex.coords[joint_info.mirror_mapping]
else:
imcoords = ex.coords
new_center_point = cameralib.reproject_image_points(center_point, orig_cam, cam)
cam.shift_to_center(new_center_point, (FLAGS.proc_side, FLAGS.proc_side))
is_annotation_invalid = (np.nan_to_num(imcoords[:, 1]) > im_from_file.shape[0] * 0.95)
imcoords[is_annotation_invalid] = np.nan
imcoords = cameralib.reproject_image_points(imcoords, orig_cam, cam)
interp_str = (FLAGS.image_interpolation_train
if learning_phase == TRAIN else FLAGS.image_interpolation_test)
antialias = (FLAGS.antialias_train if learning_phase == TRAIN else FLAGS.antialias_test)
interp = getattr(cv2, 'INTER_' + interp_str.upper())
im = cameralib.reproject_image(
im_from_file, orig_cam, cam, (FLAGS.proc_side, FLAGS.proc_side),
antialias_factor=antialias, interp=interp)
im = augmentation.appearance.augment_appearance(
im, learning_phase, FLAGS.occlude_aug_prob_2d, appearance_rng)
im = tfu.nhwc_to_std(im)
im = improc.normalize01(im)
backward_matrix = cameralib.get_affine(cam, orig_cam)
joint_validity_mask = ~np.any(np.isnan(imcoords), axis=1)
with np.errstate(invalid='ignore'):
is_joint_in_fov = ~np.logical_or(np.any(imcoords < 0, axis=-1),
| np.any(imcoords >= FLAGS.proc_side, axis=-1) | numpy.any |
""" Simple maze environment
"""
import numpy as np
# import cv2 #why is this needed?
from deer.base_classes import Environment
import matplotlib
#matplotlib.use('agg')
matplotlib.use('qt5agg')
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.patches import Circle, Rectangle
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
import copy
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
self._size_maze=8
self._higher_dim_obs=kwargs["higher_dim_obs"]
self.create_map()
self.intern_dim=2
def create_map(self):
self._map=np.ones((self._size_maze,self._size_maze))
self._map[-1,:]=0
self._map[0,:]=0
self._map[:,0]=0
self._map[:,-1]=0
self._map[:,self._size_maze//2]=0
self._map[self._size_maze//2,self._size_maze//2]=1
self._pos_agent=[2,2]
self._pos_goal=[self._size_maze-2,self._size_maze-2]
def reset(self, mode):
self.create_map()
self._map[self._size_maze//2,self._size_maze//2]=0
if mode == MyEnv.VALIDATION_MODE:
if self._mode != MyEnv.VALIDATION_MODE:
self._mode = MyEnv.VALIDATION_MODE
self._mode_score = 0.0
self._mode_episode_count = 0
else:
self._mode_episode_count += 1
elif self._mode != -1:
self._mode = -1
# Setting the starting position of the agent
self._pos_agent=[self._size_maze//2,self._size_maze//2]
#print ("new map:")
#print (self._map)
#print ("reset mode")
#print (mode)
return [1 * [self._size_maze * [self._size_maze * [0]]]]
def act(self, action):
"""Applies the agent action [action] on the environment.
Parameters
-----------
action : int
The action selected by the agent to operate on the environment. Should be an identifier
included between 0 included and nActions() excluded.
"""
self._cur_action=action
if(action==0):
if(self._map[self._pos_agent[0]-1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]-1
elif(action==1):
if(self._map[self._pos_agent[0]+1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]+1
elif(action==2):
if(self._map[self._pos_agent[0],self._pos_agent[1]-1]==1):
self._pos_agent[1]=self._pos_agent[1]-1
elif(action==3):
if(self._map[self._pos_agent[0],self._pos_agent[1]+1]==1):
self._pos_agent[1]=self._pos_agent[1]+1
# There is no reward in this simple environment
self.reward = 0
self._mode_score += self.reward
return self.reward
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
""" Plot of the low-dimensional representation of the environment built by the model
"""
all_possib_inp=[] # Will store all possible inputs (=observation) for the CRAR agent
labels_maze=[]
self.create_map()
for y_a in range(self._size_maze):
for x_a in range(self._size_maze):
state=copy.deepcopy(self._map)
state[self._size_maze//2,self._size_maze//2]=0
if(state[x_a,y_a]==0):
if(self._higher_dim_obs==True):
all_possib_inp.append(self.get_higher_dim_obs([[x_a,y_a]],[self._pos_goal]))
else:
state[x_a,y_a]=0.5
all_possib_inp.append(state)
## labels
#if(y_a<self._size_maze//2):
# labels_maze.append(0.)
#elif(y_a==self._size_maze//2):
# labels_maze.append(1.)
#else:
# labels_maze.append(2.)
#arr=np.array(all_possib_inp)
#if(self._higher_dim_obs==False):
# arr=arr.reshape(arr.shape[0],-1)
#else:
# arr=arr.reshape(arr.shape[0],-1)
#
#np.savetxt('tsne_python/mazesH_X.txt',arr.reshape(arr.shape[0],-1))
#np.savetxt('tsne_python/mazesH_labels.txt',np.array(labels_maze))
all_possib_inp=np.expand_dims(np.array(all_possib_inp,dtype='float'),axis=1)
all_possib_abs_states=learning_algo.encoder.predict(all_possib_inp)
if(all_possib_abs_states.ndim==4):
all_possib_abs_states=np.transpose(all_possib_abs_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
n=1000
historics=[]
for i,observ in enumerate(test_data_set.observations()[0][0:n]):
historics.append(np.expand_dims(observ,axis=0))
historics=np.array(historics)
abs_states=learning_algo.encoder.predict(historics)
if(abs_states.ndim==4):
abs_states=np.transpose(abs_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
actions=test_data_set.actions()[0:n]
if self.inTerminalState() == False:
self._mode_episode_count += 1
print("== Mean score per episode is {} over {} episodes ==".format(self._mode_score / (self._mode_episode_count+0.0001), self._mode_episode_count))
m = cm.ScalarMappable(cmap=cm.jet)
x = np.array(abs_states)[:,0]
y = np.array(abs_states)[:,1]
if(self.intern_dim>2):
z = np.array(abs_states)[:,2]
fig = plt.figure()
if(self.intern_dim==2):
ax = fig.add_subplot(111)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
else:
ax = fig.add_subplot(111,projection='3d')
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_zlabel(r'$X_3$')
# Plot the estimated transitions
for i in range(n-1):
predicted1=learning_algo.transition.predict([abs_states[i:i+1],np.array([[1,0,0,0]])])
predicted2=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,1,0,0]])])
predicted3=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,0,1,0]])])
predicted4=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,0,0,1]])])
if(self.intern_dim==2):
ax.plot(np.concatenate([x[i:i+1],predicted1[0,:1]]), np.concatenate([y[i:i+1],predicted1[0,1:2]]), color="0.9", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted2[0,:1]]), np.concatenate([y[i:i+1],predicted2[0,1:2]]), color="0.65", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted3[0,:1]]), np.concatenate([y[i:i+1],predicted3[0,1:2]]), color="0.4", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted4[0,:1]]), np.concatenate([y[i:i+1],predicted4[0,1:2]]), color="0.15", alpha=0.75)
else:
ax.plot(np.concatenate([x[i:i+1],predicted1[0,:1]]), np.concatenate([y[i:i+1],predicted1[0,1:2]]), np.concatenate([z[i:i+1],predicted1[0,2:3]]), color="0.9", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted2[0,:1]]), np.concatenate([y[i:i+1],predicted2[0,1:2]]), np.concatenate([z[i:i+1],predicted2[0,2:3]]), color="0.65", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted3[0,:1]]), | np.concatenate([y[i:i+1],predicted3[0,1:2]]) | numpy.concatenate |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
# Standardised Mean Squared Error
def smse(mu_star_list, Y_test_list):
error_k = []
for k in range(len(Y_test_list)):
res = mu_star_list[k] - Y_test_list[k]
error = (res**2).mean()
error = error / Y_test_list[k].var()
error_k.append(error)
return np.array(error_k)
# snlp
def snlp(var_star_list, Y_train_list, Y_test_list, mu_star_list):
error_k = []
for k in range(len(var_star_list)):
res = mu_star_list[k] - Y_test_list[k]
nlp = 0.5 * (np.log(2 * np.pi * var_star_list[k]) + res**2 / var_star_list[k]).mean()
muY = Y_train_list[k].mean()
varY = Y_train_list[k].var()
error = nlp - 0.5 * ( | np.log(2 * np.pi * varY) | numpy.log |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with action and observation specifications.
These specifications can be nested lists and dicts of `Array` and its
subclass `BoundedArray`.
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type, TypeVar
from absl import flags
from absl import logging
import dm_env
from dm_env import specs
import numpy as np
# Internal profiling
FLAGS = flags.FLAGS
# Defaulting to True, to prefer failing fast and closer to the bug.
flags.DEFINE_boolean('debug_specs', True,
'Debugging switch for checking values match specs.')
flags.DEFINE_integer('max_validations', 1000,
'Stop validating after this many calls.')
_validation_count = 0
ObservationSpec = Mapping[str, specs.Array]
ObservationValue = Mapping[str, np.ndarray]
ScalarOrArray = TypeVar('ScalarOrArray', np.floating, np.ndarray)
def debugging_flag() -> bool:
return FLAGS.debug_specs
class TimeStepSpec(object):
"""Type specification for a TimeStep."""
def __init__(self, observation_spec: ObservationSpec,
reward_spec: specs.Array, discount_spec: specs.Array):
self._observation_spec = observation_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
@property
def observation_spec(self) -> Mapping[str, specs.Array]:
return dict(self._observation_spec)
@property
def reward_spec(self) -> specs.Array:
return self._reward_spec
@property
def discount_spec(self) -> specs.Array:
return self._discount_spec
def validate(self, timestep: dm_env.TimeStep):
validate_observation(self.observation_spec, timestep.observation)
validate(self.reward_spec, timestep.reward)
validate(self.discount_spec, timestep.discount)
def minimum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = minimum(self._reward_spec)
discount = minimum(self._discount_spec)
observation = {k: minimum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def maximum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = maximum(self._reward_spec)
discount = maximum(self._discount_spec)
observation = {k: maximum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def replace(self,
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.Array] = None) -> 'TimeStepSpec':
"""Return a new TimeStepSpec with specified fields replaced."""
if observation_spec is None:
observation_spec = self._observation_spec
if reward_spec is None:
reward_spec = self._reward_spec
if discount_spec is None:
discount_spec = self._discount_spec
return TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def __eq__(self, other):
if not isinstance(other, TimeStepSpec):
return False
# All the properties of the spec must be equal.
if self.reward_spec != other.reward_spec:
return False
if self.discount_spec != other.discount_spec:
return False
if len(self.observation_spec) != len(other.observation_spec):
return False
for key in self.observation_spec:
if (key not in other.observation_spec or
self.observation_spec[key] != other.observation_spec[key]):
return False
return True
def minimum(spec: specs.Array):
if hasattr(spec, 'minimum'):
return clip(np.asarray(spec.minimum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).min)
else:
return np.full(spec.shape, np.finfo(spec.dtype).min)
def maximum(spec: specs.Array):
if hasattr(spec, 'maximum'):
return clip(np.asarray(spec.maximum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).max)
else:
return np.full(spec.shape, np.finfo(spec.dtype).max)
def zeros(action_spec: specs.Array) -> np.ndarray:
"""Create a zero value for this Spec."""
return np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
def cast(spec: specs.Array, value: ScalarOrArray) -> ScalarOrArray:
"""Cast a value to conform to a spec."""
if np.isscalar(value):
return spec.dtype.type(value)
else:
return value.astype(spec.dtype)
def clip(value: np.ndarray, spec: specs.BoundedArray) -> np.ndarray:
"""Clips the given value according to the spec."""
if value is None:
raise ValueError('no value')
if isinstance(spec.dtype, np.inexact):
eps = np.finfo(spec.dtype).eps * 5.0
else:
eps = 0
min_bound = np.array(spec.minimum, dtype=spec.dtype)
max_bound = np.array(spec.maximum, dtype=spec.dtype)
return np.clip(value, min_bound + eps, max_bound - eps)
def shrink_to_fit(
value: np.ndarray,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
) -> np.ndarray:
"""Scales the value towards zero to fit within spec min and max values.
Clipping is done after scaling to ensure there are no values that are very
slightly (say 10e-8) out of range.
This, by nature, assumes that min <= 0 <= max for the spec.
Args:
value: np.ndarray to scale towards zero.
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
Returns:
Scaled and clipped value.
Raises:
ValueError: On missing values or high-dimensional values.
"""
if value is None:
raise ValueError('no value')
if spec is None:
raise ValueError('no spec')
if not isinstance(value, np.ndarray):
raise ValueError('value not numpy array ({})'.format(type(value)))
if len(value.shape) > 1:
raise ValueError('2d values not yet handled')
if not isinstance(spec, specs.BoundedArray):
raise ValueError('Cannot scale to spec: {})'.format(spec))
if np.any(spec.minimum > 0) or np.any(spec.maximum < 0):
raise ValueError('Cannot scale to spec, due to bounds: {})'.format(spec))
factor = 1.0
for val, min_val, max_val in zip(value, spec.minimum, spec.maximum):
if val < min_val:
new_factor = min_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
if val > max_val:
new_factor = max_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
scaled = (value * factor).astype(spec.dtype)
clipped = clip(scaled, spec)
try:
validate(spec, clipped, ignore_nan)
except ValueError:
logging.error('Failed to scale %s to %s. Got: %s', value, spec, clipped)
return clipped
def merge_specs(spec_list: Sequence[specs.BoundedArray]):
"""Merges a list of BoundedArray into one."""
# Check all specs are flat.
for spec in spec_list:
if len(spec.shape) > 1:
raise ValueError('Not merging multi-dimensional spec: {}'.format(spec))
# Filter out no-op specs with no actuators.
spec_list = [spec for spec in spec_list if spec.shape and spec.shape[0]]
dtype = np.find_common_type([spec.dtype for spec in spec_list], [])
num_actions = 0
name = ''
mins = np.array([], dtype=dtype)
maxs = np.array([], dtype=dtype)
for i, spec in enumerate(spec_list):
num_actions += spec.shape[0]
if name:
name += '\t'
name += spec.name or f'spec_{i}'
mins = np.concatenate([mins, spec.minimum])
maxs = | np.concatenate([maxs, spec.maximum]) | numpy.concatenate |
# %%
#import image_previewer
import glob
from corebreakout import CoreColumn
import pickle
import numpy as np
import matplotlib.pyplot as plt
import colorsys
def slice_depths(top, base, slice_length):
length = base - top
n_slices = int(np.ceil(length / slice_length))
slices = []
for i in range(n_slices):
top_slice = top + i * slice_length
if i == n_slices-1:
base_slice = base
else:
base_slice = top + (i + 1) * slice_length
slices.append((top_slice, base_slice))
return slices
# def plot_column_slices(column_path, slice_length, figsize = (9, 800)):
# with open(column_path, 'rb') as f:
# col = pickle.load(f)
# column_top, column_base = column_depths_from_path(column_path)
# column_length = column_base - column_top
# if column_length <= slice_length:
# col.slice_depth(top = column_top,
# base = column_base).plot(
# figsize=figsize,
# major_kwargs = {'labelsize' : 10},
# minor_kwargs={'labelsize' : 6})
# else:
# depths = slice_depths(column_top, column_base, slice_length)
# for i in range(len(depths)):
# top_slice, base_slice = depths[i]
# col.slice_depth(top = top_slice,
# base = base_slice).plot(
# figsize=figsize,
# major_kwargs = {'labelsize' : 15},
# minor_kwargs={'labelsize' : 10})
# plt.show()
def img_features(img):
"""retruns mean and std of img per channel ignoring 0 values (background)
Args:
img (np.ndarray): image array
Returns:
avgs list, means list: lists of means and stds
"""
features = []
for ch in range(3):
pixels = img[:,:,ch].flatten()
pixels = pixels[pixels!=0]
if len(pixels) == 0:
avg = np.nan
std = np.nan
else:
avg = np.average(pixels)/255.0
std = np.std(pixels)/255.0#255.0
features.append(avg)
features.append(std)
return features
def column_features(column, slice_length=0.01, color_scheme = 'rgb'):
print('Processing column: {}'.format(column_path.split(os.sep)[-1]))
col_features=[]
column_top = column.top
column_base = column.base
slices = slice_depths(column_top, column_base, slice_length)
for i in range(len(slices)):
top, base = slices[i]
img = col.slice_depth(top = top, base = base).img
features = img_features(img)
if color_scheme == 'hls':
features = colorsys.rgb_to_hls(*color)
col_features.append(features)
return np.array(col_features)
directory = 'output\\article'
column_paths = glob.glob(directory + '/*/*.pkl')
print(len(column_paths), 'colomns detected')
# DELETE COLLAPSED COLUMNS
# collapse_columns = []
# for col_idx, column_path in enumerate(column_paths):
# with open(column_path, 'rb') as f:
# col = pickle.load(f)
# if col.add_mode == 'collapse':
# collapse_columns.append(column_path)
# print(len(collapse_columns), 'collapsed columns')
# for column_path in collapse_columns:
# os.remove(column_path)
#%%
step = 0.05 #0.1524
for col_idx, column_path in enumerate(column_paths):
if col_idx == 1:
break
with open(column_path, 'rb') as f:
col = pickle.load(f)
print(col_idx, col, col.add_mode)
img = col.img
img_depths = col.depths
column_top = col.top
column_base = col.base
column_length = column_base - column_top
print('column path:', column_path, 'Column length:', column_length)
features = column_features(col, slice_length=step, color_scheme='rgb')
n_steps = int(np.ceil((column_base-column_top)/step))
depths = np.linspace(column_top, column_base, n_steps)
print('Features shape:',features.shape,'Depth shape:', depths.shape)
# create two columns figure
figure_length = int(column_length)*8
figsize = (10, figure_length)
fig, axs = plt.subplots(1, 2, sharex=False, sharey=False, figsize = figsize)
axs[0].imshow(img)
axs[1].plot(features[:,0], depths, label='red', color='red')
axs[1].plot(features[:,1], depths, label='red_std', color='lightcoral')
axs[1].plot(features[:,2], depths, label='green', color='green')
axs[1].plot(features[:,3], depths, label='green_std', color='lightgreen')
axs[1].plot(features[:,4], depths, label='blue', color='blue')
axs[1].plot(features[:,5], depths, label='blue_std', color='lightblue')
axs[1].set_ylim(column_base, column_top)
plt.grid()
plt.show()
# %%
directory = r'C:\Users\evgen\Documents\coremdlr\Q204_data\train_data_figshare'
wells = [
'204-19-3A',
'204-19-6',
'204-19-7',
'204-20-1Z',
'204-20-1',
'204-20-2',
'204-20-3',
'204-20-6a',
'204-20a-7',
'204-24a-6',
'204-24a-7',
'205-21b-3',
]
labels_files = [os.path.join(directory, well + '_labels.npy') for well in wells]
image_files = [os.path.join(directory, well + '_image.npy') for well in wells]
depth_files = [os.path.join(directory, well + '_depth.npy') for well in wells]
for i in range(len(image_files)):
image = np.load(image_files[i])
labels = np.load(labels_files[i])
depth = np.load(depth_files[i])
print(wells[i], image.shape, labels.shape, depth.shape)
# %%
image = np.load(image_files[0])
labels = np.load(labels_files[0])
print(image.shape, labels.shape)
# print unique labels
unique_labels = np.unique(labels)
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
labels = label_encoder.fit_transform(labels)
print(label_encoder.classes_, label_encoder.transform(label_encoder.classes_))
# %%
# calculate statistics for each z position of image
def statistics(image):
stats = []
for z in range(image.shape[0]):
img_slice = image[z,:,:]
slice_features = []
for ch in range(3):
pixels = img_slice[:,ch].flatten()
pixels = pixels[pixels!=0]
if len(pixels) == 0:
avg = np.nan
std = np.nan
else:
avg = np.average(pixels)/255.0
std = np.std(pixels)/255.0
slice_features.append(avg)
slice_features.append(std)
stats.append(slice_features)
arr = np.array(stats)
return arr
# stats = statistics(image)
# print(stats.shape)
# %%
test_indices = [2,5,8]
train_indices = [0,1,3,4,6,7,9,10,11]
train_labels_files = [labels_files[i] for i in train_indices]
train_images_files = [image_files[i] for i in train_indices]
test_labels_files = [labels_files[i] for i in test_indices]
test_images_files = [image_files[i] for i in test_indices]
X_train = np.vstack([statistics(np.load(f)) for f in train_images_files])
X_test = np.vstack([statistics(np.load(f)) for f in test_images_files])
y_train=np.hstack([label_encoder.transform(np.load(f)) for f in train_labels_files])
y_test = np.hstack([label_encoder.transform(np.load(f)) for f in test_labels_files])
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
#%%
# get nan indices in train
nan_indices_train = np.where(np.isnan(X_train))
X_train = np.delete(X_train, nan_indices_train, axis=0)
y_train = | np.delete(y_train, nan_indices_train, axis=0) | numpy.delete |
import numpy as np
import os
from re import search
import src.numerics as num
import src.fpeqs as fpe
from src.optimal_lambda import (
optimal_lambda,
optimal_reg_param_and_huber_parameter,
)
DATA_FOLDER_PATH = "./data" # "/Volumes/LaCie/final_data_hproblem" # # "/Volumes/LaCie/final_data_hproblem" # # #
FOLDER_PATHS = [
"./data/experiments",
"./data/theory",
"./data/bayes_optimal",
"./data/reg_param_optimal",
"./data/reg_param_optimal_experimental",
"./data/reg_and_huber_param_optimal",
"./data/reg_and_huber_param_optimal_experimental",
"./data/others",
]
REG_EXPS = [
"(^exp)",
"(^theory)",
"(BO|Bayes[ ]{0,1}Optimal)",
"((reg[\_\s]{0,1}param|lambda)[\_\s]{0,1}optimal$)",
"((reg[\_\s]{0,1}param|lambda)[\_\s]{0,1}(optimal)[\_\s]{1}(exp))",
"((reg[\_\s]{0,1}param|lambda)[\s]{1}(huber[\_\s]{0,1}param)[\_\s]{0,1}optimal$)",
"((reg[\_\s]{0,1}param|lambda)[\s]{1}(huber[\_\s]{0,1}param)[\_\s]{0,1}optimal)[\_\s]{1}(exp)",
]
LOSS_NAMES = ["L2", "L1", "Huber"]
EXPERIMENTAL_FUNCTIONS = []
SINGLE_NOISE_NAMES = [
"{loss_name} single noise - exp - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - dim {n_features:d} - rep {repetitions:d} - delta {delta} - lambda {reg_param}",
"{loss_name} single noise - theory - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta} - lambda {reg_param}",
"BO single noise - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta}",
"{loss_name} single noise - reg_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta}",
"{loss_name} single noise - reg_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta}",
"Huber single noise - reg_param and huber_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta}",
"Huber single noise - reg_param and huber_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta}",
"{loss_name} single noise - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta {delta} - lambda {reg_param}",
]
DOUBLE_NOISE_NAMES = [
"{loss_name} double noise - eps {percentage} - exp - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - dim {n_features:d} - rep {repetitions:d} - delta [{delta_small} {delta_large}] - lambda {reg_param}",
"{loss_name} double noise - eps {percentage} - theory - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}] - lambda {reg_param}",
"BO double noise - eps {percentage} - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} double noise - eps {percentage} - reg_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} double noise - eps {percentage} - reg_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"Huber double noise - eps {percentage} - reg_param and huber_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"Huber double noise - eps {percentage} - reg_param and huber_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} double noise - eps {percentage} - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}] - lambda {reg_param}",
]
DECORRELATED_NOISE_NAMES = [
"{loss_name} decorrelated noise {beta} - eps {percentage} - exp - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - dim {n_features:d} - rep {repetitions:d} - delta [{delta_small} {delta_large}] - lambda {reg_param}",
"{loss_name} decorrelated noise {beta} - eps {percentage} - theory - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}] - lambda {reg_param}",
"BO decorrelated noise {beta} - eps {percentage} - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} decorrelated noise {beta} - eps {percentage} - reg_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} decorrelated noise {beta} - eps {percentage} - reg_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"Huber decorrelated noise {beta} - eps {percentage} - reg_param and huber_param optimal - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"Huber decorrelated noise {beta} - eps {percentage} - reg_param and huber_param optimal experimental - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}]",
"{loss_name} decorrelated noise {beta} - eps {percentage} - alphas [{alpha_min} {alpha_max} {alpha_pts:d}] - delta [{delta_small} {delta_large}] - lambda {reg_param}",
]
# ------------
def _exp_type_choser(test_string, values=[0, 1, 2, 3, 4, 5, 6, -1]):
for idx, re in enumerate(REG_EXPS):
if search(re, test_string):
return values[idx]
return values[-1]
def _loss_type_chose(test_string, values=[0, 1, 2, 3, 4, 5, 6, -1]):
for idx, re in enumerate(LOSS_NAMES):
if search(re, test_string):
return values[idx]
return values[-1]
def file_name_generator(**kwargs):
experiment_code = _exp_type_choser(kwargs["experiment_type"])
if not (experiment_code == 2 or experiment_code == 5 or experiment_code == 6):
if kwargs["loss_name"] == "Huber":
kwargs["loss_name"] += " " + str(kwargs.get("a", 1.0))
if kwargs.get("beta") is not None:
return DECORRELATED_NOISE_NAMES[experiment_code].format(**kwargs)
else:
if float(kwargs.get("percentage", 0.0)) == 0.0:
return SINGLE_NOISE_NAMES[experiment_code].format(**kwargs)
else:
return DOUBLE_NOISE_NAMES[experiment_code].format(**kwargs)
def create_check_folders():
if not os.path.exists(DATA_FOLDER_PATH):
os.makedirs(DATA_FOLDER_PATH)
for folder_path in FOLDER_PATHS:
os.makedirs(folder_path)
for folder_path in FOLDER_PATHS:
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def check_saved(**kwargs):
create_check_folders()
experiment_code = _exp_type_choser(kwargs["experiment_type"])
folder_path = FOLDER_PATHS[experiment_code]
file_path = os.path.join(folder_path, file_name_generator(**kwargs))
file_exists = os.path.exists(file_path + ".npz")
return file_exists, (file_path + ".npz")
def save_file(**kwargs):
file_path = kwargs.get("file_path")
experiment_code = _exp_type_choser(kwargs["experiment_type"])
if file_path is None:
file_path = os.path.join(
FOLDER_PATHS[experiment_code], file_name_generator(**kwargs)
)
if experiment_code == 0:
np.savez(
file_path,
alphas=kwargs["alphas"],
errors_mean=kwargs["errors_mean"],
errors_std=kwargs["errors_std"],
)
elif experiment_code == 1 or experiment_code == 2:
np.savez(file_path, alphas=kwargs["alphas"], errors=kwargs["errors"])
elif experiment_code == 3:
np.savez(
file_path,
alphas=kwargs["alphas"],
errors=kwargs["errors"],
lambdas=kwargs["lambdas"],
)
elif experiment_code == 4:
np.savez(
file_path,
alphas=kwargs["alphas"],
errors_mean=kwargs["errors_mean"],
errors_std=kwargs["errors_std"],
lambdas=kwargs["lambdas"],
)
elif experiment_code == 5:
np.savez(
file_path,
alphas=kwargs["alphas"],
errors=kwargs["errors"],
lambdas=kwargs["lambdas"],
huber_params=kwargs["huber_params"],
)
elif experiment_code == 6:
np.savez(
file_path,
alphas=kwargs["alphas"],
errors_mean=kwargs["errors_mean"],
errors_std=kwargs["errors_std"],
lambdas=kwargs["lambdas"],
huber_params=kwargs["huber_params"],
)
else:
raise ValueError("experiment_type not recognized.")
def load_file(**kwargs):
file_path = kwargs.get("file_path")
experiment_code = _exp_type_choser(kwargs["experiment_type"])
if file_path is None:
file_path = os.path.join(
FOLDER_PATHS[experiment_code], file_name_generator(**kwargs) + ".npz"
)
saved_data = np.load(file_path)
if experiment_code == 0:
alphas = saved_data["alphas"]
errors_mean = saved_data["errors_mean"]
errors_std = saved_data["errors_std"]
return alphas, errors_mean, errors_std
elif experiment_code == 1 or experiment_code == 2:
alphas = saved_data["alphas"]
errors = saved_data["errors"]
return alphas, errors
elif experiment_code == 3:
alphas = saved_data["alphas"]
errors = saved_data["errors"]
lambdas = saved_data["lambdas"]
return alphas, errors, lambdas
elif experiment_code == 4:
alphas = saved_data["alphas"]
errors_mean = saved_data["errors_mean"]
errors_std = saved_data["errors_std"]
lambdas = saved_data["lambdas"]
return alphas, errors_mean, errors_std, lambdas
elif experiment_code == 5:
alphas = saved_data["alphas"]
errors = saved_data["errors"]
lambdas = saved_data["lambdas"]
huber_params = saved_data["huber_params"]
return alphas, errors, lambdas, huber_params
elif experiment_code == 6:
alphas = saved_data["alphas"]
errors_mean = saved_data["errors_mean"]
errors_std = saved_data["errors_std"]
lambdas = saved_data["lambdas"]
huber_params = saved_data["huber_params"]
return alphas, errors_mean, errors_std, lambdas, huber_params
else:
raise ValueError("experiment_type not recognized.")
# ------------
def experiment_runner(**kwargs):
experiment_code = _exp_type_choser(kwargs["experiment_type"])
if experiment_code == 0:
experimental_points_runner(**kwargs)
elif experiment_code == 1:
theory_curve_runner(**kwargs)
elif experiment_code == 2:
bayes_optimal_runner(**kwargs)
elif experiment_code == 3:
reg_param_optimal_runner(**kwargs)
elif experiment_code == 4:
reg_param_optimal_experiment_runner(**kwargs)
elif experiment_code == 5:
reg_param_and_huber_param_optimal_runner(**kwargs)
elif experiment_code == 6:
reg_param_and_huber_param_experimental_optimal_runner(**kwargs)
else:
raise ValueError("experiment_type not recognized.")
def experimental_points_runner(**kwargs):
_, file_path = check_saved(**kwargs)
double_noise = not float(kwargs.get("percentage", 0.0)) == 0.0
decorrelated_noise = not (kwargs.get("beta", 1.0) == 1.0)
if decorrelated_noise:
measure_fun_kwargs = {
"delta_small": kwargs["delta_small"],
"delta_large": kwargs["delta_large"],
"percentage": kwargs["percentage"],
"beta": kwargs["beta"],
}
error_function = num.measure_gen_decorrelated
else:
if double_noise:
error_function = num.measure_gen_double
measure_fun_kwargs = {
"delta_small": kwargs["delta_small"],
"delta_large": kwargs["delta_large"],
"percentage": kwargs["percentage"],
}
else:
error_function = num.measure_gen_single
measure_fun_kwargs = {"delta": kwargs["delta"]}
if kwargs["loss_name"] == "Huber":
find_coefficients_fun_kwargs = {"a": kwargs["a"]}
else:
find_coefficients_fun_kwargs = {}
alphas, errors_mean, errors_std = num.generate_different_alpha(
error_function,
_loss_type_chose(
kwargs["loss_name"],
values=[
num.find_coefficients_L2,
-1, # num.find_coefficients_L1,
num.find_coefficients_Huber,
-1,
],
),
alpha_1=kwargs["alpha_min"],
alpha_2=kwargs["alpha_max"],
n_features=kwargs["n_features"],
n_alpha_points=kwargs["alpha_pts"],
repetitions=kwargs["repetitions"],
reg_param=kwargs["reg_param"],
measure_fun_kwargs=measure_fun_kwargs,
find_coefficients_fun_kwargs=find_coefficients_fun_kwargs,
)
kwargs.update(
{
"file_path": file_path,
"alphas": alphas,
"errors_mean": errors_mean,
"errors_std": errors_std,
}
)
save_file(**kwargs)
def theory_curve_runner(**kwargs):
_, file_path = check_saved(**kwargs)
double_noise = not float(kwargs.get("percentage", 0.0)) == 0.0
decorrelated_noise = not (kwargs.get("beta", 1.0) == 1.0)
if decorrelated_noise:
var_hat_kwargs = {
"delta_small": kwargs["delta_small"],
"delta_large": kwargs["delta_large"],
"percentage": kwargs["percentage"],
"beta": kwargs["beta"],
}
delta_small = kwargs["delta_small"]
delta_large = kwargs["delta_large"]
while True:
m = 0.89 * np.random.random() + 0.1
q = 0.89 * np.random.random() + 0.1
sigma = 0.89 * np.random.random() + 0.1
if | np.square(m) | numpy.square |
# -------------------------------------------------------------------
import cv2
import numpy as np
import time
from enum import Enum
# =============================================================================
# Ref. design
# https://github.com/Xilinx/Vitis-AI/blob/v1.1/mpsoc/vitis_ai_dnndk_samples/tf_yolov3_voc_py/tf_yolov3_voc.py
# From Vitis-AI Zoo
# 1. data channel order: BGR(0~255)
# 2. resize: 416 * 416(H * W)
# 3. mean_value: 0.0, 0.0, 0.0
# 4. scale: 1 / 255.0
# 5. reisze mode: biliner
# Data from yolov4_leaky_spp_m.prototxt
# and Xilinx yolov4-test.py
yolo_anchors = np.array([(12, 16), (19, 36), (40, 28), (36, 75), (76, 55), (72, 146), (142, 110), (192, 243),(459, 401)], np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
# -------------------------------------------------------------------
# YOLOv4 data collected from notebook (dpu_test.ipynb)
#
# inputTensor[0]: name=data_fixed, dims=[1, 416, 416, 3], dtype=xint8
#
# outputTensor[0]: name=layer138-conv_fixed, dims=[1, 52, 52, 255], dtype=xint8
# outputTensor[1]: name=layer149-conv_fixed, dims=[1, 26, 26, 255], dtype=xint8
# outputTensor[2]: name=layer160-conv_fixed, dims=[1, 13, 13, 255], dtype=xint8
# -------------------------------------------------------------------
# Load .xmodel downloaded from Vitis-AI repository
#yolov4_model_path = "models/yolov4_leaky_spp_m/yolov4_leaky_spp_m.xmodel"
yolov4_model_path = "models/yolov4_leaky_spp_m_pruned_0_36/yolov4_leaky_spp_m_pruned_0_36.xmodel"
# =============================================================================
# -------------------------------------------------------------------
def resize_with_padding(image, size):
# resize image with unchanged aspect ratio using padding
ih, iw, _ = image.shape
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw,nh), interpolation=cv2.INTER_LINEAR)
new_image = np.ones((h,w,3), np.uint8) * 128
h_start = (h-nh)//2
w_start = (w-nw)//2
new_image[h_start:h_start+nh, w_start:w_start+nw, :] = image
return new_image
# -------------------------------------------------------------------
def preprocess_img(image, size, fixpos):
image = image[...,::-1]
image = resize_with_padding(image, size)
image_data = np.array(image, dtype='float32', order='C')
fix_scale = 2**fixpos
image_data *= fix_scale/255
image_data = np.expand_dims(image_data, 0)
return image_data
# -------------------------------------------------------------------
def sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
import numpy as np
from autoarray.structures import grids
from autogalaxy.profiles import geometry_profiles
from autogalaxy.profiles import mass_profiles as mp
from autogalaxy import convert
import typing
from scipy.interpolate import griddata
from autogalaxy import exc
class MassSheet(geometry_profiles.SphericalProfile, mp.MassProfile):
def __init__(
self, centre: typing.Tuple[float, float] = (0.0, 0.0), kappa: float = 0.0
):
"""
Represents a mass-sheet
Parameters
----------
centre: (float, float)
The (y,x) arc-second coordinates of the profile centre.
kappa : float
The magnitude of the convergence of the mass-sheet.
"""
super(MassSheet, self).__init__(centre=centre)
self.kappa = kappa
def convergence_func(self, grid_radius):
return 0.0
@grids.grid_like_to_structure
def convergence_from_grid(self, grid):
return | np.full(shape=grid.shape[0], fill_value=self.kappa) | numpy.full |
import torch
import os
from torch.distributions import Normal
import gym
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import cv2
from itertools import permutations
import h5py
from sklearn.feature_selection import mutual_info_regression
import matplotlib.ticker as ticker
from a2c_ppo_acktr.envs import FetchWrapper #TODO remove fetch and add meta-world instead
from a2c_ppo_acktr.utils import load_expert
#TODO remove any 'fetch' related thing from the repo
from a2c_ppo_acktr.utils import generate_latent_codes
class Base:
def __init__(self, args, env, actor_critic, filename, obsfilt, vae_data):
if args.fetch_env:
self.env = FetchWrapper(env)
else:
self.env = env
self.actor_critic = actor_critic
self.args = args
self.obsfilt = obsfilt
self.filename = filename
self.vae_data = vae_data
self.vae_mus = vae_data[0]
assert not args.vanilla, 'Vanilla GAIL benchmarking not implemented'
self.max_episode_steps = self._get_max_episode_steps()
def resolve_latent_code(self, states, actions, i):
def _get_sog(args, actor_critic):
from a2c_ppo_acktr.algo.sog import OneHotSearch, BlockCoordinateSearch
if args.latent_optimizer == 'bcs':
SOG = BlockCoordinateSearch
elif args.latent_optimizer == 'ohs':
SOG = OneHotSearch
else:
raise NotImplementedError
return SOG(actor_critic, args)
device = self.args.device
if self.args.sog_gail:
sog = _get_sog(self.args, self.actor_critic)
return sog.resolve_latent_code(torch.from_numpy(self.obsfilt(states[i].cpu().numpy(), update=False)).float().to(device), actions[i].to(device))[:1]
elif self.args.vae_gail:
return self.vae_mus[i]
elif self.args.infogail:
return generate_latent_codes(self.args, count=1, eval=True)
else:
raise NotImplementedError
def _get_max_episode_steps(self):
return {
'Circles-v0': 1000,
'AntDir-v0': 200,
'HalfCheetahVel-v0': 200,
'FetchReach-v0': 50,
'HopperVel-v0': 1000,
'Walker-v0': 1000,
'HumanoidDir-v0': 1000,
}.get(self.args.env_name, 200)
class Play(Base):
def __init__(self, **kwargs):
super(Play, self).__init__(**kwargs)
if self.args.fetch_env:
self.max_episode_steps = self.env.env._max_episode_steps
else:
max_episode_time = 10
dt = kwargs['env'].dt
self.max_episode_steps = int(max_episode_time / dt)
def play(self):
args = self.args
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
if args.fetch_env:
video_size = (500, 500)
else:
video_size = (250, 250)
video_writer = cv2.VideoWriter(f'{self.filename}.avi', fourcc, 1 / self.env.dt, video_size)
s = self.env.reset()
if (args.fetch_env or args.mujoco) and args.continuous and args.env_name != 'HalfCheetahVel-v0':
expert = torch.load(args.expert_filename, map_location=args.device)
count = 30 if args.fetch_env else 5 # for fetch, try 30 of the goals from the expert trajectories
####### recover expert embeddings #######
expert_len = len(expert['states'])
sample_idx = torch.randint(low=0, high=expert_len, size=(count,))
states, actions, desired_goals = [expert.get(key, [None]*expert_len)[sample_idx] for key in ('states', 'actions', 'desired_goal')] # only keep the trajectories specified by `sample_idx`
latent_codes = [self.resolve_latent_code(states, actions, i) for i in range(len(states))]
else:
# if 'Humanoid' in args.env_name and args.continuous:
# expert = load_expert(args.expert_filename, device=args.device)
# ####### recover expert embeddings #######
# count = 5
# sample_idx = torch.randint(low=0, high=len(expert['states']), size=(count,))
# states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
# latent_codes = [self.resolve_latent_code(torch.from_numpy(self.obsfilt(states.cpu().numpy(), update=False)).float().to(args.device), actions, i) for i in len(states)]
# expert = load_expert(args.expert_filename, device=args.device)
# ####### recover expert embeddings #######
# latent_codes = list()
# possible_angles = torch.unique(expert['angles'])
# sog = self._get_sog()
# for angle in possible_angles:
# sample_idx = expert['angles'] == angle
# states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
# from tqdm import tqdm
# mode_list = []
# for (traj_states, traj_actions) in tqdm(zip(states, actions)):
# mode_list.append(sog.resolve_latent_code(torch.from_numpy(self.obsfilt(traj_states.cpu().numpy(), update=False)).float().to(args.device), traj_actions)[0])
# latent_codes.append(torch.stack(mode_list).mean(0))
count = None
if args.vae_gail and args.env_name == 'HalfCheetahVel-v0':
count = 30
latent_codes = generate_latent_codes(args, count=count, vae_data=self.vae_data, eval=True)
for j, latent_code in enumerate(latent_codes):
latent_code = latent_code
episode_reward = 0
if args.fetch_env:
self.env.set_desired_goal(desired_goals[j].cpu().numpy())
self.env._max_env_steps = 100
# print(desired_goals[j])
print(f'traj #{j+1}/{len(latent_codes)}')
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, _ = self.env.step(action)
episode_reward += r
if done:
break
I = self.env.render(mode='rgb_array')
I = cv2.cvtColor(I, cv2.COLOR_RGB2BGR)
I = cv2.resize(I, video_size)
video_writer.write(I)
if args.fetch_env:
pass
# achieved_goal = self.env.unwrapped.sim.data.get_site_xpos("robot0:grip").copy()
# success = self.env.unwrapped._is_success(achieved_goal, self.env.unwrapped.goal)
# print('success' if success else 'failed')
else:
print(f"episode reward:{episode_reward:3.3f}")
s = self.env.reset()
video_writer.write(np.zeros([*video_size, 3], dtype=np.uint8))
self.env.close()
video_writer.release()
cv2.destroyAllWindows()
class Plot(Base):
def plot(self):
return {'Circles-v0': self._circles_ellipses,
'Ellipses-v0': self._circles_ellipses,
'HalfCheetahVel-v0': self._halfcheetahvel,
'AntDir-v0': self._ant,
'FetchReach-v1': self._fetch,
'Walker2dVel-v0': self._walker_hopper,
'HopperVel-v0': self._walker_hopper,
'HumanoidDir-v0': self._humanoid,
}.get(self.args.env_name, lambda: None)()
def _circles_ellipses(self):
args, actor_critic, filename = self.args, self.actor_critic, self.filename
fig = plt.figure(figsize=(2, 3), dpi=300)
plt.set_cmap('gist_rainbow')
# plotting the actual circles/ellipses
if args.env_name == 'Circles-v0':
for r in args.radii:
t = np.linspace(0, 2 * np.pi, 200)
plt.plot(r * np.cos(t), r * np.sin(t) + r, color='#d0d0d0')
elif args.env_name == 'Ellipses-v0':
for rx, ry in np.array(args.radii).reshape(-1, 2):
t = np.linspace(0, 2 * np.pi, 200)
plt.plot(rx * np.cos(t), ry * np.sin(t) + ry, color='#d0d0d0')
max_r = np.max(np.abs(args.radii))
plt.axis('equal')
# plt.axis('off')
# Turn off tick labels
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.yaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
plt.xlim([-1 * max_r, 1 * max_r])
plt.ylim([-1.5 * max_r, 2.5 * max_r])
import gym_sog
env = gym.make(args.env_name, args=args)
obs = env.reset()
device = next(actor_critic.parameters()).device
count = 3
latent_codes = generate_latent_codes(args, count=count, vae_data=self.vae_data, eval=True)
# generate rollouts and plot them
for j, latent_code in enumerate(latent_codes):
latent_code = latent_code.unsqueeze(0)
for i in range(self.max_episode_steps):
# randomize latent code at each step in case of vanilla gail
if args.vanilla:
latent_code = generate_latent_codes(args)
# interacting with env
with torch.no_grad():
# an extra 0'th dimension is because actor critic works with "environment vectors" (see the training code)
obs = self.obsfilt(obs, update=False)
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=device)[None]
_, actions_tensor, _ = actor_critic.act(obs_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
obs, _, _, _ = env.step(action)
# plotting the trajectory
plt.plot(env.loc_history[:, 0], env.loc_history[:, 1], color=plt.cm.Dark2.colors[j])
if args.vanilla:
break # one trajectory in vanilla mode is enough. if not, then rollout for each separate latent code
else:
obs = env.reset()
env.close()
plt.savefig(filename + '.png')
plt.close()
def _fetch(self):
args = self.args
actor_critic = self.actor_critic
obsfilt = self.obsfilt
filename = self.filename
# TODO
expert = load_expert(args.expert_filename)
count = 100 # how many number of expert trajectories
sample_idx = np.random.randint(low=0, high=len(expert['states']), size=(count,))
# sample_idx = np.arange(len(expert['states']))
states, actions, desired_goals = [expert[key][sample_idx] for key in ('states', 'actions', 'desired_goal')] # only keep the trajectories specified by `sample_idx`
# init env
env = gym.make(args.env_name)
# init plots
matplotlib.rcParams['legend.fontsize'] = 10
fig = plt.figure(figsize=(3,3), dpi=300)
ax = fig.gca(projection='3d')
normalize = lambda x: (x - np.array([1.34183226, 0.74910038, 0.53472284]))/.15 # map the motion range to the unit cube
s = self.env.reset()
for i in range(len(states)):
env.unwrapped.goal = desired_goals[i].cpu().numpy()
latent_code = self.resolve_latent_code(states, actions, i)
achieved_goals = np.zeros([env._max_episode_steps, 3])
s = env.reset()['observation']
for step in range(env._max_episode_steps):
s = obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
obs, _, _, _ = env.step(action)
s = obs['observation']
achieved_goals[step] = normalize(obs['achieved_goal'])
ax.plot(*achieved_goals.T)
if not args.infogail:
ax.scatter(*normalize(desired_goals).T)
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_zlim([-1,1])
plt.savefig(f'{filename}.png')
def _halfcheetahvel(self):
device = self.args.device
filename = self.filename
if self.args.vae_gail:
# 2100 x 1 or 2100 x 20
latent_codes = self.vae_mus
# 30 x 70 x 1 x 1 or 30 x 70 x 1 x 20
latent_codes = latent_codes.reshape(30, 70, 1, -1)
# latent_codes = latent_codes[:, :num_repeats]
x = np.linspace(1.5, 3, 30)
else:
num_codes, num_repeats = 100, 30
cdf = np.linspace(.1, .9, num_codes)
m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
# num_codes
latent_codes = m.icdf(torch.tensor(cdf, dtype=torch.float32)).to(device)
# num_codes x num_repeats x 1 x 1
latent_codes = latent_codes[:, None, None, None].expand(-1, num_repeats, -1, -1)
x = cdf
vel_mean = []
vel_std = []
for j, latent_code_group in enumerate(latent_codes):
print(f'{j+1} / {len(latent_codes)}')
vels = []
for k, latent_code in enumerate(latent_code_group):
print(f'\t - {k+1} / {len(latent_code_group)}')
s = self.env.reset()
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, infos = self.env.step(action)
vels.append(infos['forward_vel'])
# fix for the dataset slight offset of the dataset that begins from 1.539 instead of accurate 1.5 #TODO modify the dataset instead
rescale = lambda input, input_low, input_high, output_low, output_high: ((input - input_low) / (input_high - input_low)) * (output_high - output_low) + output_low
vels = rescale(np.array(vels), 1.539, 3., 1.5, 3)
vel_mean.append(np.mean(vels))
vel_std.append(np.std(vels))
self.env.close()
vel_mean, vel_std = np.array(vel_mean), np.array(vel_std)
plt.figure(figsize=(3.5, 7/5*1.85), dpi=300)
plt.plot(x, vel_mean)#, marker='o', color='r')
plt.fill_between(x, vel_mean-vel_std, vel_mean+vel_std, alpha=0.2)
for bound in (1.5, 3):
plt.axhline(bound, linestyle='--', c='0.5')
plt.ylim([0,5])
plt.savefig(f'{filename}.png')
plt.close()
plt.figure()
plt.hist(vel_mean, bins=np.linspace(1.5, 3, 10))
plt.savefig(f'{filename}_hist.png')
plt.close()
def _ant(self):
args = self.args
fig = plt.figure(figsize=(3,3), dpi=300)
num_repeats = 3
if args.vae_gail:
all_codes = generate_latent_codes(args, vae_data=self.vae_data, eval=True)
else:
all_codes = torch.eye(args.latent_dim, device=args.device)
# Turn off tick labels
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.yaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.set_xlim([-70, 70])
ax.set_ylim([-70, 70])
m = []
for j, latent_code in enumerate(all_codes):
latent_code = latent_code[None]
for _ in range(num_repeats):
s = self.env.reset()
xpos = []
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, infos = self.env.step(action)
xpos.append(infos['xpos'])
xpos = np.array(xpos)
m.append(np.max(np.abs(xpos)))
ax.plot(xpos[:, 0], xpos[:, 1], color=(plt.cm.Dark2.colors[j]))
ax.plot([0], [0], marker='o', markersize=3, color='k')
if args.vae_gail or args.infogail:
m = max(m) * (np.random.rand() * 3 + 1)
ax.set_xlim([-m,m])
ax.set_ylim([-m,m])
plt.savefig(f'{self.filename}.png')
plt.close()
self.env.close()
def _humanoid(self):
args = self.args
fig = plt.figure(figsize=(3,3), dpi=300)
num_repeats = 3
if args.vae_gail:
all_codes = generate_latent_codes(args, vae_data=self.vae_data, eval=True)
elif args.continuous:
if args.sog_gail:
latent_codes = []
count = 3
expert = load_expert(args.expert_filename, device=args.device)
modes = expert['modes'].cpu().numpy()
unique_modes = np.unique(modes)
for i in range(args.num_clusters):
sample_idx = np.random.permutation(np.argwhere(modes==unique_modes[i]).squeeze())[:count]
states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
latent_codes.extend([self.resolve_latent_code(states, actions, i) for i in range(len(states))])
all_codes = torch.cat(latent_codes)
else:
raise NotImplementedError
else:
all_codes = torch.eye(args.latent_dim, device=args.device)
# Turn off tick labels
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.yaxis.set_major_locator(ticker.MultipleLocator(10.00))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.set_xlim([-300, 300])
ax.set_ylim([-300, 300])
m = []
for j, latent_code in enumerate(all_codes):
latent_code = latent_code[None]
for _ in range(num_repeats):
s = self.env.reset()
xvel = []
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=args.device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, infos = self.env.step(action)
xvel.append(s[22:24])
xpos = np.cumsum(np.array(xvel), axis=0)
m.append(np.max(np.abs(xpos)))
ax.plot(xpos[:, 0], xpos[:, 1], color=(plt.cm.Dark2.colors[j//count if (args.sog_gail and args.continuous) else j]))
ax.plot([0], [0], marker='o', markersize=3, color='k')
if args.vae_gail or args.infogail:
# TODO
m = max(m) * (np.random.rand() * 3 + 1)
ax.set_xlim([-m,m])
ax.set_ylim([-m,m])
plt.savefig(f'{self.filename}.png')
plt.close()
self.env.close()
def _walker_hopper(self):
args = self.args
device = args.device
filename = self.filename
count_per_mode = 3
if args.continuous:
expert = load_expert(args.expert_filename)
if self.args.vae_gail:
modes = load_expert(args.expert_filename)['modes']
latent_codes = self.vae_mus
# group latent codes into groups
latent_codes = [latent_codes[modes==m][:count_per_mode] for m in np.unique(modes)]
elif args.continuous:
if args.sog_gail:
latent_codes = []
expert = load_expert(args.expert_filename, device=args.device)
modes = expert['modes'].cpu().numpy()
unique_modes = np.unique(modes)
for i in range(args.num_clusters):
sample_idx = np.random.permutation(np.argwhere(modes==unique_modes[i]).squeeze())[:count_per_mode]
states, actions = [expert[key][sample_idx] for key in ('states', 'actions')] # only keep the trajectories specified by `sample_idx`
latent_codes.append(torch.cat([self.resolve_latent_code(states, actions, i) for i in range(len(states))]))
else:
latent_codes = torch.eye(args.latent_dim, device=args.device).unsqueeze(1).expand(-1, count_per_mode, -1)
#>>>>>> <<<<<<<
#>>>>>>>>>>>>>> find x, latent codes in all cases <<<<<<<<<<<<<<
#>>>>>> <<<<<<<
vels_all = []
for j, latent_code_group in enumerate(latent_codes):
vels_mode = []
for k, latent_code in enumerate(latent_code_group):
vels = []
latent_code = latent_code[None]
s = self.env.reset()
for step in range(self.max_episode_steps):
s = self.obsfilt(s, update=False)
s_tensor = torch.tensor(s, dtype=torch.float32, device=device)[None]
with torch.no_grad():
_, actions_tensor, _ = self.actor_critic.act(s_tensor, latent_code, deterministic=True)
action = actions_tensor[0].cpu().numpy()
s, r, done, infos = self.env.step(action)
vels.append(infos['forward_vel'])
vels_mode.append(vels)
vels_all.append(vels_mode)
self.env.close()
plt.figure(figsize=(3, 2), dpi=300)
for i, vels_mode in enumerate(vels_all):
for j, vels in enumerate(vels_mode):
plt.plot(np.arange(len(vels)), vels, color=plt.cm.Dark2.colors[i])
plt.xlim([0, self.max_episode_steps - 1])
plt.ylim([-3,3])
plt.savefig(f'{filename}.png')
plt.close()
class Benchmark(Base):
def collect_rewards(self, group):
"""
Creates matrix of rewards of latent codes vs radii, find the best correspondence between the codes and the radii, and store the results.
Returns:
all_mode_rewards_mean: numpy array of shape [latent_dim x latent_dim] containing mean reward collected in a trajectory
all_mode_rewards_std: numpy array of shape [latent_dim x latent_dim] containing std of rewards collected among trajectories
"""
args = self.args
device = args.device
num_modes = args.vae_num_modes if args.vae_gail else args.latent_dim
assert num_modes <= 6, 'all permutations of too many dimensions of latent codes is prohibitively costly, try implementing hungarian method'
trajs_per_mode = 10
if group == 'expert':
return
# if args.env_name not in {'Circles-v0' or 'Ellipses-v0'} or 'expert' in self.h5:
# return
# from gym_sog.envs.circles_expert import CirclesExpert
# circles_expert = CirclesExpert(self.args)
all_mode_rewards_mean, all_mode_rewards_std = [], []
all_codes = generate_latent_codes(args, vae_data=self.vae_data, eval=True)
for i, latent_code in enumerate(all_codes):
print(group, i)
latent_code = latent_code[None]
all_traj_rewards = []
for _ in range(trajs_per_mode):
print(_)
obs = self.env.reset()
traj_rewards = np.zeros(num_modes)
for step in range(self.max_episode_steps):
# if group == 'expert':
# action = circles_expert.policy(obs, args.radii[i])
# else:
if True:
with torch.no_grad():
# an extra 0'th dimension is because actor critic works with "environment vectors" (see the training code)
obs = self.obsfilt(obs, update=False)
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=device)[None]
_, actions_tensor, _ = self.actor_critic.act(obs_tensor, latent_code, deterministic=True)
if np.random.rand() > args.test_perturb_amount:
action = actions_tensor[0].cpu().numpy()
else:
action = self.env.action_space.sample()
obs, _, _, infos = self.env.step(action)
traj_rewards += np.array(infos['rewards'])
# each element: [num_modes,]
all_traj_rewards.append(traj_rewards)
# [trajs_per_mode, num_modes]
all_traj_rewards = np.stack(all_traj_rewards)
# each element: [num_modes,]
all_mode_rewards_mean.append(all_traj_rewards.mean(axis=0))
all_mode_rewards_std.append(all_traj_rewards.std(axis=0))
# [num_modes, num_modes]
rew_mean, rew_std = np.stack(all_mode_rewards_mean), np.stack(all_mode_rewards_std)
# Record rewards according to best correspondence of latent codes and actual modes
max_reward, best_mean, best_std = -np.inf, None, None
for perm_mean, perm_std in zip(permutations(rew_mean), permutations(rew_std)):
tmp = np.array(perm_mean).trace()
if tmp > max_reward:
max_reward = tmp
best_mean = perm_mean
best_std = perm_std
d = {'mean': np.diag(np.array(best_mean)), 'std': np.diag(np.array(best_std))}
print(d)
self.store(group, d)
def collect_mutual_info(self, group):
args = self.args
device = args.device
num_codes, num_repeats = 30, 1#70
if args.vae_gail:
# 2100 x 1 or 2100 x 20
latent_codes = self.vae_mus
# 30 x 70 x 1 x 1 or 30 x 70 x 1 x 20
latent_codes = latent_codes.reshape(30, 70, 1, -1)
x = np.arange(30)
else:
cdf = | np.linspace(.1, .9, num_codes) | numpy.linspace |
from typing import Any, Set, Tuple, Union, Optional
from pathlib import Path
from collections import defaultdict
from html.parser import HTMLParser
import pytest
from anndata import AnnData
import numpy as np
import xarray as xr
from imageio import imread, imsave
import tifffile
from squidpy.im import ImageContainer
from squidpy.im._utils import CropCoords, CropPadding, _NULL_COORDS
from squidpy._constants._pkg_constants import Key
class SimpleHTMLValidator(HTMLParser): # modified from CellRank
def __init__(self, n_expected_rows: int, expected_tags: Set[str], **kwargs: Any):
super().__init__(**kwargs)
self._cnt = defaultdict(int)
self._n_rows = 0
self._n_expected_rows = n_expected_rows
self._expected_tags = expected_tags
def handle_starttag(self, tag: str, attrs: Any) -> None:
self._cnt[tag] += 1
self._n_rows += tag == "strong"
def handle_endtag(self, tag: str) -> None:
self._cnt[tag] -= 1
def validate(self) -> None:
assert self._n_rows == self._n_expected_rows
assert set(self._cnt.keys()) == self._expected_tags
if len(self._cnt):
assert set(self._cnt.values()) == {0}
class TestContainerIO:
def test_empty_initialization(self):
img = ImageContainer()
assert not len(img)
assert isinstance(img.data, xr.Dataset)
assert img.shape == (0, 0)
assert str(img)
assert repr(img)
def _test_initialize_from_dataset(self):
dataset = xr.Dataset({"foo": xr.DataArray(np.zeros((100, 100, 3)))}, attrs={"foo": "bar"})
img = ImageContainer._from_dataset(data=dataset)
assert img.data is not dataset
assert "foo" in img
assert img.shape == (100, 100)
np.testing.assert_array_equal(img.data.values(), dataset.values)
assert img.data.attrs == dataset.attrs
def test_save_load_zarr(self, tmpdir):
img = ImageContainer(np.random.normal(size=(100, 100, 1)))
img.data.attrs["scale"] = 42
img.save(Path(tmpdir) / "foo")
img2 = ImageContainer.load(Path(tmpdir) / "foo")
np.testing.assert_array_equal(img["image"].values, img2["image"].values)
np.testing.assert_array_equal(img.data.dims, img2.data.dims)
np.testing.assert_array_equal(sorted(img.data.attrs.keys()), sorted(img2.data.attrs.keys()))
for k, v in img.data.attrs.items():
assert type(v) == type(img2.data.attrs[k]) # noqa: E721
assert v == img2.data.attrs[k]
def test_load_zarr_2_objects_can_overwrite_store(self, tmpdir):
img = ImageContainer(np.random.normal(size=(100, 100, 1)))
img.data.attrs["scale"] = 42
img.save(Path(tmpdir) / "foo")
img2 = ImageContainer.load(Path(tmpdir) / "foo")
img2.data.attrs["sentinel"] = "foobar"
img2["image"].values += 42
img2.save(Path(tmpdir) / "foo")
img3 = ImageContainer.load(Path(tmpdir) / "foo")
assert "sentinel" in img3.data.attrs
assert img3.data.attrs["sentinel"] == "foobar"
np.testing.assert_array_equal(img3["image"].values, img2["image"].values)
np.testing.assert_allclose(img3["image"].values - 42, img["image"].values)
@pytest.mark.parametrize(
("shape1", "shape2"),
[
((100, 200, 3), (100, 200, 1)),
((100, 200, 3), (100, 200)),
],
)
def test_add_img(self, shape1: Tuple[int, ...], shape2: Tuple[int, ...]):
img_orig = np.random.randint(low=0, high=255, size=shape1, dtype=np.uint8)
cont = ImageContainer(img_orig, layer="img_orig")
img_new = np.random.randint(low=0, high=255, size=shape2, dtype=np.uint8)
cont.add_img(img_new, layer="img_new", channel_dim="mask")
assert "img_orig" in cont
assert "img_new" in cont
np.testing.assert_array_equal(np.squeeze(cont.data["img_new"]), np.squeeze(img_new))
@pytest.mark.parametrize("shape", [(100, 200, 3), (100, 200, 1)])
def test_load_jpeg(self, shape: Tuple[int, ...], tmpdir):
img_orig = np.random.randint(low=0, high=255, size=shape, dtype=np.uint8)
fname = tmpdir / "tmp.jpeg"
imsave(str(fname), img_orig)
gt = imread(str(fname)) # because of compression, we load again
cont = ImageContainer(str(fname))
np.testing.assert_array_equal(cont["image"].values.squeeze(), gt.squeeze())
@pytest.mark.parametrize("shape", [(100, 200, 3), (100, 200, 1), (10, 100, 200, 1)])
def test_load_tiff(self, shape: Tuple[int, ...], tmpdir):
img_orig = np.random.randint(low=0, high=255, size=shape, dtype=np.uint8)
fname = tmpdir / "tmp.tiff"
tifffile.imsave(fname, img_orig)
cont = ImageContainer(str(fname))
if len(shape) > 3: # multi-channel tiff
np.testing.assert_array_equal(cont["image"], img_orig[..., 0].transpose(1, 2, 0))
else:
np.testing.assert_array_equal(cont["image"], img_orig)
def test_load_netcdf(self, tmpdir):
arr = np.random.normal(size=(100, 10, 4))
ds = xr.Dataset({"quux": xr.DataArray(arr, dims=["foo", "bar", "baz"])})
fname = tmpdir / "tmp.nc"
ds.to_netcdf(str(fname))
cont = ImageContainer(str(fname))
assert len(cont) == 1
assert "quux" in cont
np.testing.assert_array_equal(cont["quux"], ds["quux"])
@pytest.mark.parametrize(
"array", [np.zeros((10, 10, 3), dtype=np.uint8), np.random.rand(10, 10, 1).astype(np.float32)]
)
def test_array_dtypes(self, array: Union[np.ndarray, xr.DataArray]):
img = ImageContainer(array)
np.testing.assert_array_equal(img["image"].data, array)
assert img["image"].data.dtype == array.dtype
img = ImageContainer(xr.DataArray(array))
np.testing.assert_array_equal(img["image"].data, array)
assert img["image"].data.dtype == array.dtype
def test_add_img_invalid_yx(self, small_cont_1c: ImageContainer):
arr = xr.DataArray(np.empty((small_cont_1c.shape[0] - 1, small_cont_1c.shape[1])), dims=["y", "x"])
with pytest.raises(ValueError, match=r".*be aligned because they have different dimension sizes"):
small_cont_1c.add_img(arr)
def test_xarray_remapping_spatial_dims(self):
cont = ImageContainer(np.empty((100, 10)))
cont.add_img(xr.DataArray(np.empty((100, 10)), dims=["foo", "bar"]), layer="baz")
assert "baz" in cont
assert len(cont) == 2
assert cont["baz"].dims == ("y", "x", "channels")
@pytest.mark.parametrize("n_channels", [2, 3, 11])
def test_add_img_number_of_channels(self, n_channels: int):
img = ImageContainer()
arr = np.random.rand(10, 10, n_channels)
img.add_img(arr)
assert img["image_0"].channels.shape == (n_channels,)
@pytest.mark.parametrize("n_channels", [1, 3])
@pytest.mark.parametrize("channel_dim", ["channels", "foo"])
def test_add_img_channel_dim(self, small_cont_1c: ImageContainer, channel_dim: str, n_channels: int):
arr = np.random.normal(size=(*small_cont_1c.shape, n_channels))
if channel_dim == "channels" and n_channels == 3:
with pytest.raises(ValueError, match=r".*be aligned because they have different dimension sizes"):
small_cont_1c.add_img(arr, channel_dim=channel_dim)
else:
small_cont_1c.add_img(arr, channel_dim=channel_dim, layer="bar")
assert len(small_cont_1c) == 2
assert "bar" in small_cont_1c
assert small_cont_1c["bar"].dims == ("y", "x", channel_dim)
np.testing.assert_array_equal(small_cont_1c["bar"], arr)
def test_delete(self, small_cont_1c: ImageContainer):
assert len(small_cont_1c) == 1
del small_cont_1c["image"]
assert len(small_cont_1c) == 0
with pytest.raises(KeyError, match=r"'image'"):
del small_cont_1c["image"]
class TestContainerCropping:
def test_padding_top_left(self, small_cont_1c: ImageContainer):
crop = small_cont_1c.crop_center(0, 0, 10)
data = crop["image"].data
assert crop.shape == (21, 21)
np.testing.assert_array_equal(data[:10, :10], 0)
np.testing.assert_array_equal(data[10:, 10:] != 0, True)
def test_padding_top_right(self, small_cont_1c: ImageContainer):
crop = small_cont_1c.crop_center(0, small_cont_1c.shape[1], 10)
data = crop["image"].data
assert crop.shape == (21, 21)
np.testing.assert_array_equal(data[:10, 10:], 0)
np.testing.assert_array_equal(data[10:, :10] != 0, True)
def test_padding_bottom_left(self, small_cont_1c: ImageContainer):
crop = small_cont_1c.crop_center(small_cont_1c.shape[1], 0, 10)
data = crop["image"].data
assert crop.shape == (21, 21)
np.testing.assert_array_equal(data[10:, :10], 0)
np.testing.assert_array_equal(data[:10, 10:] != 0, True)
def test_padding_bottom_right(self, small_cont_1c: ImageContainer):
crop = small_cont_1c.crop_center(small_cont_1c.shape[1], small_cont_1c.shape[1], 10)
data = crop["image"].data
assert crop.shape == (21, 21)
np.testing.assert_array_equal(data[10:, 10:], 0)
np.testing.assert_array_equal(data[:10, :10] != 0, True)
def test_padding_left_right(self, small_cont_1c: ImageContainer):
dim1, dim2, _ = small_cont_1c["image"].data.shape
crop = small_cont_1c.crop_center(dim1 // 2, 0, dim1 // 2)
data = crop["image"].data
np.testing.assert_array_equal(data[:, : dim2 // 2], 0)
crop = small_cont_1c.crop_center(dim1 // 2, dim2, dim1 // 2)
data = crop["image"].data
np.testing.assert_array_equal(data[:, dim2 // 2 :], 0)
def test_padding_top_bottom(self, small_cont_1c: ImageContainer):
dim1, dim2, _ = small_cont_1c["image"].data.shape
crop = small_cont_1c.crop_center(dim1, dim2 // 2, dim1 // 2)
data = crop["image"].data
np.testing.assert_array_equal(data[dim1 // 2 :, :], 0)
crop = small_cont_1c.crop_center(0, dim2 // 2, dim1 // 2)
data = crop["image"].data
np.testing.assert_array_equal(data[: dim2 // 2, :], 0)
def test_padding_all(self, small_cont_1c: ImageContainer):
dim1, dim2, _ = small_cont_1c["image"].data.shape
crop = small_cont_1c.crop_center(dim1 // 2, dim2 // 2, dim1)
data = crop["image"].data
np.testing.assert_array_equal(data[:, : dim2 // 2], 0)
np.testing.assert_array_equal(data[: dim2 // 2, :], 0)
@pytest.mark.parametrize("dy", [-10, 25, 0.3])
@pytest.mark.parametrize("dx", [-10, 30, 0.5])
def test_crop_corner_size(
self, small_cont_1c: ImageContainer, dy: Optional[Union[int, float]], dx: Optional[Union[int, float]]
):
crop = small_cont_1c.crop_corner(dy, dx, size=20)
# original coordinates
ody, odx = max(dy, 0), max(dx, 0)
ody = int(ody * small_cont_1c.shape[0]) if isinstance(ody, float) else ody
odx = int(odx * small_cont_1c.shape[1]) if isinstance(odx, float) else odx
# crop coordinates
cdy = 0 if isinstance(dy, float) or dy > 0 else dy
cdx = 0 if isinstance(dx, float) or dx > 0 else dx
cdy, cdx = abs(cdy), abs(cdx)
assert crop.shape == (20, 20)
cdata, odata = crop["image"].data, small_cont_1c["image"].data
cdata = cdata[cdy:, cdx:]
np.testing.assert_array_equal(cdata, odata[ody : ody + cdata.shape[0], odx : odx + cdata.shape[1]])
@pytest.mark.parametrize("scale", [0, 0.5, 1.0, 1.5, 2.0])
def test_crop_corner_scale(self, scale: float):
shape_img = (50, 50)
img = ImageContainer(np.zeros(shape_img))
if scale <= 0:
with pytest.raises(ValueError, match=r"Expected `scale` to be positive, found `0`."):
img.crop_corner(10, 10, size=20, scale=scale)
else:
crop = img.crop_corner(10, 10, size=20, scale=scale)
assert crop.shape == tuple(int(i * scale) for i in (20, 20))
@pytest.mark.parametrize("cval", [0.5, 1.0, 2.0])
def test_test_crop_corner_cval(self, cval: float):
shape_img = (50, 50)
img = ImageContainer(np.zeros(shape_img))
crop = img.crop_corner(10, 10, cval=cval)
np.testing.assert_array_equal(crop["image"].data[-10:, -10:], cval)
@pytest.mark.parametrize("size", [(10, 10), (10, 11)])
def test_crop_corner_mask_circle(self, small_cont_1c: ImageContainer, size: Tuple[int, int]):
if size[0] != size[1]:
with pytest.raises(ValueError, match=r"Masking circle is only"):
small_cont_1c.crop_corner(0, 0, size=size, mask_circle=True, cval=np.nan)
else:
crop = small_cont_1c.crop_corner(0, 0, size=20, mask_circle=True, cval=np.nan)
mask = (crop.data.x - 10) ** 2 + (crop.data.y - 10) ** 2 <= 10 ** 2
assert crop.shape == (20, 20)
np.testing.assert_array_equal(crop["image"].values[..., 0][~mask.values], np.nan)
@pytest.mark.parametrize("ry", [23, 1.0])
@pytest.mark.parametrize("rx", [30, 0.5])
def test_crop_center_radius(
self, small_cont_1c: ImageContainer, ry: Optional[Union[int, float]], rx: Optional[Union[int, float]]
):
crop = small_cont_1c.crop_center(0, 0, radius=(ry, rx))
sy = int(ry * small_cont_1c.shape[0]) if isinstance(ry, float) else ry
sx = int(rx * small_cont_1c.shape[1]) if isinstance(rx, float) else rx
assert crop.shape == (2 * sy + 1, 2 * sx + 1)
@pytest.mark.parametrize("as_array", [False, True, "image", ["image", "baz"]])
def test_equal_crops_as_array(self, small_cont: ImageContainer, as_array: bool):
small_cont.add_img(np.random.normal(size=(small_cont.shape + (1,))), channel_dim="foobar", layer="baz")
for crop in small_cont.generate_equal_crops(size=11, as_array=as_array):
if as_array:
if isinstance(as_array, bool):
assert isinstance(crop, dict)
for key in small_cont:
assert key in crop
assert crop[key].shape == (11, 11, small_cont[key].data.shape[-1])
elif isinstance(as_array, str):
assert isinstance(crop, np.ndarray)
assert crop.shape == (11, 11, small_cont[as_array].data.shape[-1])
else:
assert isinstance(crop, tuple)
assert len(crop) == len(as_array)
for key, data in zip(as_array, crop):
assert isinstance(data, np.ndarray)
assert data.shape == (11, 11, small_cont[key].data.shape[-1])
else:
assert isinstance(crop, ImageContainer)
for key in (Key.img.coords, Key.img.padding, Key.img.scale, Key.img.mask_circle):
assert key in crop.data.attrs, key
assert crop.shape == (11, 11)
@pytest.mark.parametrize("return_obs", [False, True])
@pytest.mark.parametrize("as_array", [False, True, "baz"])
def test_spot_crops_as_array_return_obs(
self, adata: AnnData, cont: ImageContainer, as_array: bool, return_obs: bool
):
cont.add_img(np.random.normal(size=(cont.shape + (4,))), channel_dim="foobar", layer="baz")
diameter = adata.uns["spatial"][Key.uns.library_id(adata, "spatial")]["scalefactors"]["spot_diameter_fullres"]
radius = int(round(diameter // 2))
size = (2 * radius + 1, 2 * radius + 1)
for crop in cont.generate_spot_crops(adata, as_array=as_array, return_obs=return_obs, spatial_key="spatial"):
crop, obs = crop if return_obs else (crop, None)
if obs is not None:
assert obs in adata.obs_names
if not as_array:
assert Key.img.obs in crop.data.attrs
if as_array is True:
assert isinstance(crop, dict), type(crop)
for key in cont:
assert key in crop
assert crop[key].shape == (*size, cont[key].data.shape[-1])
elif isinstance(as_array, str):
assert isinstance(crop, np.ndarray)
assert crop.shape == (*size, cont[as_array].data.shape[-1])
else:
assert isinstance(crop, ImageContainer)
assert crop.shape == size
@pytest.mark.parametrize("n_names", [None, 4])
def test_spot_crops_obs_names(self, adata: AnnData, cont: ImageContainer, n_names: Optional[int]):
obs = adata.obs_names[:n_names] if isinstance(n_names, int) else adata.obs_names
crops = list(cont.generate_spot_crops(adata, obs_names=obs))
assert len(crops) == len(obs)
for crop, o in zip(crops, obs):
assert crop.data.attrs[Key.img.obs] == o
@pytest.mark.parametrize("spot_scale", [1, 0.5, 2])
@pytest.mark.parametrize("scale", [1, 0.5, 2])
def test_spot_crops_spot_scale(self, adata: AnnData, cont: ImageContainer, scale: float, spot_scale: float):
diameter = adata.uns["spatial"][Key.uns.library_id(adata, "spatial")]["scalefactors"]["spot_diameter_fullres"]
radius = int(round(diameter // 2) * spot_scale)
size = int((2 * radius + 1) * scale), int((2 * radius + 1) * scale)
for crop in cont.generate_spot_crops(adata, spot_scale=spot_scale, scale=scale):
assert crop.shape == size
@pytest.mark.parametrize("preserve", [False, True])
def test_preserve_dtypes(self, cont: ImageContainer, preserve: bool):
assert np.issubdtype(cont["image"].dtype, np.uint8)
crop = cont.crop_corner(-10, -10, 20, cval=-5, preserve_dtypes=preserve)
if preserve:
assert np.issubdtype(crop["image"].dtype, np.uint8)
# we specifically use 0, otherwise overflow would happend and the value would be 256 - 5
np.testing.assert_array_equal(crop["image"][:10, :10], 0)
else:
assert np.issubdtype(crop["image"].dtype, np.signedinteger)
np.testing.assert_array_equal(crop["image"][:10, :10], -5)
def test_spot_crops_mask_circle(self, adata: AnnData, cont: ImageContainer):
for crop in cont.generate_spot_crops(adata, cval=np.nan, mask_circle=True, preserve_dtypes=False):
assert crop.shape[0] == crop.shape[1]
c = crop.shape[0] // 2
mask = (crop.data.x - c) ** 2 + (crop.data.y - c) ** 2 <= c ** 2
np.testing.assert_array_equal(crop["image"].values[..., 0][~mask.values], np.nan)
def test_uncrop_preserves_shape(self, small_cont_1c: ImageContainer):
small_cont_1c.add_img(np.random.normal(size=(small_cont_1c.shape + (4,))), channel_dim="foobar", layer="baz")
crops = list(small_cont_1c.generate_equal_crops(size=13))
uncrop = ImageContainer.uncrop(crops)
np.testing.assert_array_equal(small_cont_1c.shape, uncrop.shape)
for key in small_cont_1c:
np.testing.assert_array_equal(uncrop[key], small_cont_1c[key])
def test_uncrop_too_small_requested_shape(self, small_cont_1c: ImageContainer):
crops = list(small_cont_1c.generate_equal_crops(size=13))
with pytest.raises(ValueError, match=r"Requested final image shape"):
ImageContainer.uncrop(crops, shape=(small_cont_1c.shape[0] - 1, small_cont_1c.shape[1] - 1))
@pytest.mark.parametrize("dy", [-10, 0])
def test_crop_metadata(self, small_cont_1c: ImageContainer, dy: int):
crop = small_cont_1c.crop_corner(dy, 0, 50, mask_circle=True)
assert small_cont_1c.data.attrs[Key.img.coords] is _NULL_COORDS
assert crop.data.attrs[Key.img.coords] == CropCoords(0, 0, 50, 50 + dy)
assert crop.data.attrs[Key.img.padding] == CropPadding(x_pre=0, y_pre=abs(dy), x_post=0, y_post=0)
assert crop.data.attrs[Key.img.mask_circle]
class TestContainerUtils:
def test_iter(self, small_cont_1c: ImageContainer):
expected = list(small_cont_1c.data.keys())
actual = list(small_cont_1c)
np.testing.assert_array_equal(actual, expected)
@pytest.mark.parametrize("deep", [False, True])
def test_copy(self, deep: bool):
cont = ImageContainer(np.random.normal(size=(10, 10)))
sentinel = object()
cont.data.attrs["sentinel"] = sentinel
copy = cont.copy(deep=deep)
if deep:
assert not np.shares_memory(copy["image"].values, cont["image"].values)
assert copy.data.attrs["sentinel"] is not sentinel
else:
assert np.shares_memory(copy["image"].values, cont["image"].values)
assert copy.data.attrs["sentinel"] is sentinel
def test_get_size(self):
cont = ImageContainer(np.empty((10, 10)))
ry, rx = cont._get_size(None)
assert (ry, rx) == cont.shape
ry, rx = cont._get_size((None, 1))
assert (ry, rx) == (cont.shape[0], 1)
ry, rx = cont._get_size((-1, None))
assert (ry, rx) == (-1, cont.shape[1])
@pytest.mark.parametrize("sx", [-1, -1.0, 0.5, 10])
@pytest.mark.parametrize("sy", [-1, -1.0, 0.5, 10])
def test_to_pixel_space(self, sy: Union[int, float], sx: Union[int, float]):
cont = ImageContainer(np.empty((10, 10)))
if (isinstance(sy, float) and sy < 0) or (isinstance(sx, float) and sx < 0):
with pytest.raises(ValueError, match=r"Expected .* to be in interval `\[0, 1\]`.*"):
cont._convert_to_pixel_space((sy, sx))
else:
ry, rx = cont._convert_to_pixel_space((sy, sx))
if isinstance(sy, int):
assert ry == sy
else:
assert ry == int(cont.shape[0] * sy)
if isinstance(sx, int):
assert rx == sx
else:
assert rx == int(cont.shape[1] * sx)
@pytest.mark.parametrize("channel", [None, 0])
@pytest.mark.parametrize("copy", [False, True])
def test_apply(self, copy: bool, channel: Optional[int]):
cont = ImageContainer(np.random.normal(size=(100, 100, 3)))
orig = cont.copy()
res = cont.apply(lambda arr: arr + 42, channel=channel, copy=copy)
if copy:
assert isinstance(res, ImageContainer)
data = res["image"]
else:
assert res is None
assert len(cont) == 1
data = cont["image"]
if channel is None:
np.testing.assert_allclose(data.values, orig["image"].values + 42)
else:
np.testing.assert_allclose(data.values[..., 0], orig["image"].values[..., channel] + 42)
def test_image_autoincrement(self, small_cont_1c: ImageContainer):
assert len(small_cont_1c) == 1
for _ in range(20):
small_cont_1c.add_img(np.empty(small_cont_1c.shape))
assert len(small_cont_1c) == 21
for i in range(20):
assert f"image_{i}" in small_cont_1c
@pytest.mark.parametrize("size", [0, 10, 20])
def test_repr_html(self, size: int):
cont = ImageContainer()
for _ in range(size):
cont.add_img(np.empty((10, 10)))
validator = SimpleHTMLValidator(
n_expected_rows=min(size, 10), expected_tags=set() if not size else {"p", "em", "strong"}
)
validator.feed(cont._repr_html_())
validator.validate()
def test_repr(self):
cont = ImageContainer()
assert "shape=(0, 0)" in repr(cont)
assert "layers=[]" in repr(cont)
assert repr(cont) == str(cont)
class TestCroppingExtra:
def test_big_crop(self, cont_dot: ImageContainer):
crop = cont_dot.crop_center(
y=50,
x=20,
radius=150,
cval=5,
)
np.testing.assert_array_equal(crop.data["image_0"].shape, (301, 301, 10))
# check that values outside of img are padded with 5
np.testing.assert_array_equal(crop.data["image_0"][0, 0, 0], 5)
np.testing.assert_array_equal(crop.data["image_0"][-1, -1, 0], 5)
assert crop.data["image_0"].dtype == np.uint8
# compare with crop_corner
crop2 = cont_dot.crop_corner(y=-100, x=-130, size=301, cval=5)
np.testing.assert_array_equal(crop2.data["image_0"], crop.data["image_0"])
def test_crop_smapp(self, cont_dot: ImageContainer):
crop = cont_dot.crop_center(
x=50,
y=20,
radius=0,
cval=5,
)
np.testing.assert_array_equal(crop.data["image_0"].shape, (1, 1, 10))
np.testing.assert_array_equal(crop.data["image_0"][0, 0, :3], [10, 11, 12])
assert crop.data["image_0"].dtype == np.uint8
def test_crop_mask_circle(self, cont_dot: ImageContainer):
# crop with mask_circle
crop = cont_dot.crop_center(
y=20,
x=50,
radius=5,
cval=5,
mask_circle=True,
)
np.testing.assert_array_equal(crop.data["image_0"][1, 0, :], 5)
np.testing.assert_array_equal(crop.data["image_0"][2, 2, :], 0)
np.testing.assert_array_equal(crop.data["image_0"][7, 7, :], 0)
np.testing.assert_array_equal(crop.data["image_0"][9, 9, :], 5)
def test_crop_multiple_images(self, cont_dot: ImageContainer):
mask = np.random.randint(low=0, high=10, size=cont_dot.shape)
cont_dot.add_img(mask, layer="image_1", channel_dim="mask")
crop = cont_dot.crop_center(
y=50,
x=20,
radius=0,
cval=5,
)
assert "image_0" in crop
assert "image_1" in crop
| np.testing.assert_array_equal(crop.data["image_0"].shape, (1, 1, 10)) | numpy.testing.assert_array_equal |
from pathlib import Path
import numpy as np
import pandas as pd
import tensorly as tl
def subsample_data(df: pd.DataFrame) -> np.ndarray:
"""Sub-samples the data to make it more manageable for this assignment
Parameters
----------
df : pd.DataFrame
DataFrame to subsample
Returns
-------
np.ndarray
Sub-sampled array ready to merged into a tensor
"""
df = df.set_index(["timestamp", "forecast_timestamp"])
df = df[~df.index.duplicated()]
# Each timestamp has 24.5 hours worth of forecasts; just grab the first one
unique_timestamps = df.index.get_level_values("timestamp").unique()
first_forecasts = unique_timestamps + pd.Timedelta(30, "min")
idx = zip(unique_timestamps, first_forecasts)
df = df.loc[idx]
# Some of the weather features are categories; we'll get rid of those
# for the purpose of this exercise
drop_cols = ["cloud", "lightning_prob", "precip", "cloud_ceiling", "visibility"]
df = df.drop(columns=drop_cols)
df = df.dropna()
# Let's grab 2000 random samples from the data to help with SVD convergence
rng = np.random.default_rng(17)
idx = rng.choice( | np.arange(df.shape[0]) | numpy.arange |
import numpy as np
def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):
x0 = (x_pts[-1]+x_pts[0])/2 + x_offset
y0 = (y_pts[-1]+y_pts[0])/2 + y_offset
xx, yy = np.meshgrid(x_pts, y_pts)
sigma = mfd * 0.707 / 2.355
sigma_x = sigma
sigma_y = sigma
gaus_2d = | np.exp(-((xx-x0)**2/(2*sigma_x**2)+
(yy-y0)**2/(2*sigma_y**2))) | numpy.exp |
#%%
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import numpy as np
from itertools import product
import seaborn as sns
### MAIN HYPERPARAMS ###
slots = 1
shifts = 6
alg_name = ['L2N','L2F']
########################
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_fte_bte(err, single_err):
bte = [[] for i in range(10)]
te = [[] for i in range(10)]
fte = []
for i in range(10):
for j in range(i,10):
#print(err[j][i],j,i)
bte[i].append(err[i][i]/err[j][i])
te[i].append(single_err[i]/err[j][i])
for i in range(10):
fte.append(single_err[i]/err[i][i])
return fte,bte,te
def calc_mean_bte_(btes,task_num=10,reps=6):
mean_bte = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(btes[i][j])
tmp=tmp/reps
mean_bte[j].extend(tmp)
return mean_bte
def calc_mean_te(tes,task_num=10,reps=6):
mean_te = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(tes[i][j])
tmp=tmp/reps
mean_te[j].extend(tmp)
return mean_te
def calc_mean_fte(ftes,task_num=10,reps=6):
fte = | np.asarray(ftes) | numpy.asarray |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([-1,0,0,0,-1,0,0,0,1]) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import logging
import numpy as np
from sklearn.utils import shuffle
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua import AquaError, Pluggable, PluggableType, get_pluggable_class
from qiskit.aqua.algorithms.adaptive.qsvm import (cost_estimate, return_probabilities)
from qiskit.aqua.utils import (get_feature_dimension, map_label_to_class_name,
split_dataset_to_data_and_labels)
logger = logging.getLogger(__name__)
class QSVMVariational(QuantumAlgorithm):
CONFIGURATION = {
'name': 'QSVM.Variational',
'description': 'QSVM_Variational Algorithm',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'SVM_Variational_schema',
'type': 'object',
'properties': {
'override_SPSA_params': {
'type': 'boolean',
'default': True
},
'batch_mode': {
'type': 'boolean',
'default': False
},
'minibatch_size': {
'type': 'integer',
'default': -1
}
},
'additionalProperties': False
},
'problems': ['svm_classification'],
'depends': [
{
'pluggable_type': 'optimizer',
'default': {
'name': 'SPSA'
},
},
{
'pluggable_type': 'feature_map',
'default': {
'name': 'SecondOrderExpansion',
'depth': 2
},
},
{
'pluggable_type': 'variational_form',
'default': {
'name': 'RYRZ',
'depth': 3
},
},
],
}
def __init__(self, optimizer, feature_map, var_form, training_dataset,
test_dataset=None, datapoints=None, batch_mode=False,
minibatch_size=-1, callback=None):
"""Initialize the object
Args:
training_dataset (dict): {'A': numpy.ndarray, 'B': numpy.ndarray, ...}
test_dataset (dict): the same format as `training_dataset`
datapoints (numpy.ndarray): NxD array, N is the number of data and D is data dimension
optimizer (Optimizer): Optimizer instance
feature_map (FeatureMap): FeatureMap instance
var_form (VariationalForm): VariationalForm instance
batch_mode (boolean): Batch mode for circuit compilation and execution
callback (Callable): a callback that can access the intermediate data during the optimization.
Internally, four arguments are provided as follows
the index of data batch, the index of evaluation,
parameters of variational form, evaluated value.
Notes:
We used `label` denotes numeric results and `class` means the name of that class (str).
"""
self.validate(locals())
super().__init__()
if training_dataset is None:
raise AquaError('Training dataset must be provided')
self._training_dataset, self._class_to_label = split_dataset_to_data_and_labels(
training_dataset)
self._label_to_class = {label: class_name for class_name, label
in self._class_to_label.items()}
self._num_classes = len(list(self._class_to_label.keys()))
if test_dataset is not None:
self._test_dataset = split_dataset_to_data_and_labels(test_dataset,
self._class_to_label)
else:
self._test_dataset = test_dataset
if datapoints is not None and not isinstance(datapoints, np.ndarray):
datapoints = | np.asarray(datapoints) | numpy.asarray |
import stokepy as sp
import numpy as np
# instantiate class
fmc = sp.FiniteMarkovChain()
# create initial distribution vector
phi = | np.array([0, 0, 1, 0, 0]) | numpy.array |
import numpy as np
import gym
from gym import spaces
import math
MAX_MARCH = 20
EPSILON = 0.1
DEG_TO_RAD = 0.0174533
WINDOW_SIZE = (200, 300) # Width x Height in pixels
def generate_box(pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = np.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = np.random.uniform([box_size[0], box_size[1]],
[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal)
else:
return Box(pos, box_size, color=color, is_goal=is_goal)
def generate_circle(pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False):
circ_rad = np.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = np.random.uniform([circ_rad, circ_rad], [WINDOW_SIZE[0]-circ_rad, WINDOW_SIZE[1]-circ_rad])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
def generate_boxes(num_boxes=5, size=[10, 25], is_goal=False, inside_window=True, color=(255, 255, 255)):
centers = []
sizes = []
boxes = []
for i in range(num_boxes):
box = generate_box(size=size, color=color, is_goal=is_goal, inside_window=inside_window)
centers.append(box.center)
sizes.append(box.size)
boxes.append(box)
centers = np.array(centers)
sizes = np.array(sizes)
return boxes, centers, sizes
def generate_circles(num_circles=5, radius=[10, 25], is_goal=False, inside_window=True, color=(255, 255, 255)):
centers = []
radii = []
circles = []
for i in range(num_circles):
circle = generate_circle(radius=radius, color=color, is_goal=is_goal, inside_window=inside_window)
centers.append(circle.center)
radii.append(circle.radius)
circles.append(circle)
centers = np.array(centers)
radii = np.array(radii)
return circles, centers, radii
def reset_objects():
'''reset global object lists to be populated'''
items = ['boxes', 'box_centers', 'box_sizes', 'circles', 'circle_centers',
'circle_radii', 'objects']
for item in items:
globals()[item] = []
def add_box(box):
'''add box to global boxes object for computation'''
globals()['boxes'].append(box)
if len(globals()['box_centers']) > 0:
globals()['box_centers'] = np.vstack([box_centers, np.array([box.center])])
globals()['box_sizes'] = np.vstack([box_sizes, np.array([box.size])])
else:
globals()['box_centers'] = np.array([box.center])
globals()['box_sizes'] = np.array([box.size])
globals()['objects'] = globals()['boxes'] + globals()['circles']
def add_circle(circle):
'''add circle to global circles object for computation'''
globals()['circles'].append(circle)
if len(globals()['circle_centers']) > 0:
globals()['circle_centers'] = np.vstack([circle_centers, np.array([circle.center])])
globals()['circle_radii'] = np.vstack([circle_radii, np.array([circle.radius])])
else:
globals()['circle_centers'] = np.array([circle.center])
globals()['circle_radii'] = np.array([circle.radius])
globals()['objects'] = globals()['boxes'] + globals()['circles']
def add_walls():
add_box(Box(np.array([0, 0]), np.array([1, WINDOW_SIZE[1]]), color=(0, 255, 0)))
add_box(Box(np.array([0, 0]), np.array([WINDOW_SIZE[0], 1]), color=(0, 255, 0)))
add_box(Box(np.array([0, WINDOW_SIZE[1]]), np.array([WINDOW_SIZE[0], 1]), color=(0, 255, 0)))
add_box(Box(np.array([WINDOW_SIZE[0], 0]), np.array([1, WINDOW_SIZE[1]]), color=(0, 255, 0)))
def spaced_random_pos(sep=5):
'''
Find a spot that has a minimum separation from other objects in the scene
'''
while True:
pos = np.random.uniform(WINDOW_SIZE)
if scene_sdf(pos)[0] > sep:
return pos
def generate_world(num_objects=5, min_goal_sep=15, color=(0, 255, 0)):
reset_objects()
'''generate obstacles'''
boxes, box_centers, box_sizes = generate_boxes(num_objects, inside_window=False, color=color)
circles, circle_centers, circle_radii = generate_circles(num_objects, inside_window=False, color=color)
globals()['boxes'] = boxes
globals()['box_centers'] = box_centers
globals()['box_sizes'] = box_sizes
globals()['circles'] = circles
globals()['circle_centers'] = circle_centers
globals()['circle_radii'] = circle_radii
globals()['objects'] = boxes + circles
#create walls around screen:
add_walls()
#create a goal, require it to be at least 30 units away from player
searching = True
while searching:
pos = np.random.uniform(WINDOW_SIZE)
if scene_sdf(pos)[0] > min_goal_sep:
#position is okay
searching = False
# pos = np.array([500, 500])
goal = generate_box(pos=pos, size=[15, 15], is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_box(goal)
def block_view_world(character, block_size=25, randomize_heading=0):
'''
Create a setting where the goal is perfectly blocked by a block
randomize_heading:
0 - always fixed
1 - randomize headings but point agent in the right direction
2 - randomize headings and point agent in random direction
'''
# print('call block view world')
reset_objects()
boxes, box_centers, box_sizes = generate_boxes(0)
circles, circle_centers, circle_radii = generate_circles(0)
#add a single block in the center of the screen
add_box(Box(np.array([WINDOW_SIZE[0]/2, WINDOW_SIZE[1]/2]),
np.array([block_size, block_size]), color=(0, 255, 0)))
add_walls()
base_size = 15
base_x = 150
base_y = 100
base_radius = 88
if randomize_heading > 0:
angle = np.random.uniform(6.28)
x = np.cos(angle) * base_radius
y = np.sin(angle) * base_radius
goal = Box(np.array([x + base_x, y + base_y]), np.array([base_size, base_size]),
is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_box(goal)
angle2 = angle + 3.14
x = np.cos(angle2) * base_radius
y = np.sin(angle2) * base_radius
character.pos = np.array([x + base_x, y + base_y])
if randomize_heading > 1:
character.angle = np.random.uniform(6.28)
else:
character.angle = angle
character.update_rays()
else:
#add the goal
goal = Box(np.array([WINDOW_SIZE[0] - 50, WINDOW_SIZE[1]/2]),
np.array([base_size, base_size]),
is_goal=True, color=(255, 0, 0))
globals()['goal'] = goal
add_box(goal)
#set the agent position
character.pos = np.array([50, WINDOW_SIZE[1]/2])
character.angle = 0
character.update_rays()
def dist(v):
'''calculate length of vector'''
return np.linalg.norm(v)
def scene_sdf(p):
# closest_sdf = np.inf
# closest = None
# for obj in objects:
# obj.draw()
# sdf = obj.sdf(p)
# if sdf < closest_sdf:
# closest_sdf = sdf
# closest = obj
# return closest_sdf, closest
box_dists = box_sdfs(p)
circle_dists = circle_sdfs(p)
dists = np.append(box_dists, circle_dists)
min_dist = np.min(dists)
obj_index = | np.argmin(dists) | numpy.argmin |
'''
'''
import os
import pickle
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from itertools import chain, combinations_with_replacement
# -- astropy --
import astropy.units as u
from astropy.time import Time
# -- specsim --
import specsim
from specsim.atmosphere import Moon
# -- feasibgs --
from . import util as UT
def Isky_regression(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and
sun parameters. The sky surface brightness uses a regression model fit
using BOSS and DESI CMX sky fibers to predict V-band moonlight surface
brightness. This V-band magnitude is then used to scale up the dark time
sky.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom], sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='regression')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
Isky = specsim_sky.surface_brightness.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_newKS_twi(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and sun parameters.
The sky surface brightness uses the KS model scaling with coefficients re-fit to match
BOSS sky data and includes a twilight contribution from Parker's thesis.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom] and sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='refit_ks')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
# updated KS coefficients
specsim_sky.moon.KS_CR = 458173.535128
specsim_sky.moon.KS_CM0 = 5.540103
specsim_sky.moon.KS_CM1 = 178.141045
_sky = specsim_sky._surface_brightness_dict['dark'].copy()
_sky *= specsim_sky.extinction
I_ks_rescale = specsim_sky.surface_brightness
Isky = I_ks_rescale.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep):
''' Parker's sky model, which is a function of:
:param airmass:
airmass
:param ecl_lat:
ecliptic latitude (used for zodiacal light contribution)
:param gal_lat:
galactic latitude (used for ISL contribution)
:param gal_lon:
galactic longitude (used for ISL contribution)
:param tai:
time in seconds
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
'''
from astroplan import Observer
from astropy.coordinates import EarthLocation
X = airmass # air mass
beta = ecl_lat # ecliptic latitude ( used for zodiacal light contribution )
l = gal_lat # galactic latitude ( used for ISL contribution )
b = gal_lon # galactic longitude ( used for ISL contribution )
_kpno = EarthLocation.of_site('kitt peak')
obs_time = Time(tai/86400., scale='tai', format='mjd', location=_kpno)
mjd = obs_time.mjd
# fractional months ( used for seasonal contribution)
month_frac = obs_time.datetime.month + obs_time.datetime.day/30.
# fractional hour ( used for hourly contribution)
kpno = Observer(_kpno)
sun_rise = kpno.sun_rise_time(obs_time, which='next')
sun_set = kpno.sun_set_time(obs_time, which='previous')
hour = ((obs_time - sun_set).sec)/3600.
hour_frac = hour/((Time(sun_rise, format='mjd') - Time(sun_set,format = 'mjd')).sec/3600.)
alpha = sun_alt # sun altitude
delta = sun_sep # sun separation (separation between the target and the sun's location)
# used for scattered moonlight
g = moon_phase # moon phase
altm = moon_alt
illm = moon_ill
delm = moon_sep
# get coefficients
coeffs = _read_parkerCoeffs()
# sky continuum
_w, _Icont = _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g)
S_continuum = _Icont / np.pi # BOSS has 2 arcsec diameter
# sky emission from the UVES continuum subtraction
w_uves, S_uves = np.loadtxt(''.join([UT.code_dir(), 'dat/sky/UVES_sky_emission.dat']),
unpack=True, usecols=[0,1])
f_uves = interp1d(w_uves, S_uves, bounds_error=False, fill_value='extrapolate')
S_emission = f_uves(_w)
return _w, S_continuum + S_emission
def Isky_parker_radecobs(ra, dec, obs_time):
''' wrapper for Isky_parker, where the input parameters are calculated based
on RA, Dec, and obs_time
'''
from astroplan import download_IERS_A
from astropy.coordinates import EarthLocation, SkyCoord, AltAz, get_sun, get_moon
download_IERS_A()
# target coordinates
coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg)
# observed time (UTC)
utc_time = Time(obs_time)
kpno = EarthLocation.of_site('kitt peak')
kpno_altaz = AltAz(obstime=utc_time, location=kpno)
coord_altaz = coord.transform_to(kpno_altaz)
airmass = coord_altaz.secz
elc_lat = coord.barycentrictrueecliptic.lat.deg
gal_lat = coord.galactic.l.deg # galactic latitude ( used for ISL contribution )
gal_lon = coord.galactic.b.deg # galactic longitude ( used for ISL contribution )
tai = utc_time.tai
# sun altitude (degrees)
sun = get_sun(utc_time)
sun_altaz = sun.transform_to(kpno_altaz)
sunalt = sun_altaz.alt.deg
# sun separation
sunsep = sun.separation(coord).deg
# used for scattered moonlight
moon = get_moon(utc_time)
moon_altaz = moon.transform_to(kpno_altaz)
moon_alt = moon_altaz.alt.deg
moon_sep = moon.separation(coord).deg #coord.separation(self.moon).deg
elongation = sun.separation(moon)
phase = np.arctan2(sun.distance * np.sin(elongation), moon.distance - sun.distance*np.cos(elongation))
moon_phase = phase.value
moon_ill = (1. + np.cos(phase))/2.
return Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep)
def _specsim_initialize(config, model='regression'):
''' hacked version of specsim.atmosphere.initialize, which initializes the
atmosphere model from configuration parameters.
'''
if specsim.config.is_string(config):
config = specsim.config.load_config(config)
atm_config = config.atmosphere
# Load tabulated data.
surface_brightness_dict = config.load_table(
atm_config.sky, 'surface_brightness', as_dict=True)
extinction_coefficient = config.load_table(
atm_config.extinction, 'extinction_coefficient')
# Initialize an optional atmospheric seeing PSF.
psf_config = getattr(atm_config, 'seeing', None)
if psf_config:
seeing = dict(
fwhm_ref=specsim.config.parse_quantity(psf_config.fwhm_ref),
wlen_ref=specsim.config.parse_quantity(psf_config.wlen_ref),
moffat_beta=float(psf_config.moffat_beta))
else:
seeing = None
# Initialize an optional lunar scattering model.
moon_config = getattr(atm_config, 'moon', None)
if moon_config:
moon_spectrum = config.load_table(moon_config, 'flux')
c = config.get_constants(moon_config,
['moon_zenith', 'separation_angle', 'moon_phase'])
moon = _Moon(
config.wavelength, moon_spectrum, extinction_coefficient,
atm_config.airmass, c['moon_zenith'], c['separation_angle'],
c['moon_phase'], model=model)
else:
moon = None
atmosphere = specsim.atmosphere.Atmosphere(
config.wavelength, surface_brightness_dict, extinction_coefficient,
atm_config.extinct_emission, atm_config.sky.condition,
atm_config.airmass, seeing, moon)
if config.verbose:
print(
"Atmosphere initialized with condition '{0}' from {1}."
.format(atmosphere.condition, atmosphere.condition_names))
if seeing:
print('Seeing is {0} at {1} with Moffat beta {2}.'
.format(seeing['fwhm_ref'], seeing['wlen_ref'],
seeing['moffat_beta']))
if moon:
print(
'Lunar V-band extinction coefficient is {0:.5f}.'
.format(moon.vband_extinction))
return atmosphere
class _Moon(Moon):
''' specimsim.atmosphere.Moon object hacked to work with a Krisciunas & Schaefer (1991)
model with extra free parameters
'''
def __init__(self, wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase,
model='regression'):
# initialize via super function
super().__init__(wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase)
self.model = model
# default KS coefficients
self.KS_CR = 10**5.36 # proportionality constant in the Rayleigh scattering function
# constants for the Mie scattering function term
self.KS_CM0 = 6.15
self.KS_CM1 = 40.
self.KS_M0 = -12.73
self.KS_M1 = 0.026
self.KS_M2 = 4.
def _update(self):
"""Update the model based on the current parameter values.
"""
self._update_required = False
# Calculate the V-band surface brightness of scattered moonlight.
if self.model == 'refit_ks':
self._scattered_V = krisciunas_schaefer_free(
self.obs_zenith, self.moon_zenith, self.separation_angle,
self.moon_phase, self.vband_extinction, self.KS_CR, self.KS_CM0,
self.KS_CM1, self.KS_M0, self.KS_M1, self.KS_M2)
elif self.model == 'regression':
self._scattered_V = _scattered_V_regression(
self.airmass,
0.5 * (np.cos(np.pi * self.moon_phase) + 1.),
90 - self.moon_zenith.value,
self.separation_angle.value) * u.mag / u.arcsec**2
else:
raise NotImplementedError
# Calculate the wavelength-dependent extinction of moonlight
# scattered once into the observed field of view.
scattering_airmass = (
1 - 0.96 * | np.sin(self.moon_zenith) | numpy.sin |
from __future__ import absolute_import, division, print_function
# TensorFlow and tf.keras
import tensorflow as tf
import keras
from keras.utils import CustomObjectScope
from keras.initializers import glorot_uniform
from keras.preprocessing import image
from keras.models import Sequential, load_model, model_from_json
# Helper libraries
import numpy as np
import glob
import cv2
import scipy.io as sio
import os
print(tf.__version__)
def main():
class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
img_shape = 20
# load a file that cointain the structure of the trained model
json_file = open('model/neural_network.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
model = model_from_json(loaded_model_json)
# load the weights of the trained model
model.load_weights("model/neural_network.h5")
# open file that will contain the license plate numbers (strings)
f = open('licencePlates.txt', 'w' )
# path that contains the images of licence plate chars, each image contain chars (20x20 images)
# concatenate each other (the dimension of the image will be #ofchars x 20)
fn = "licence_plates/*.jpg"
# extract image names from the path
filenames = glob.glob(fn)
filenames.sort()
images = []
# load images and save them in a vector of images
for img in filenames:
image = cv2.imread(img)
images.append(image)
for img in images:
S = ''
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)/255
# extract each char (20x20) from the image
for j in range(int(img.size/(img_shape*img_shape))):
char = img[:,img_shape*j:img_shape*(j+1)]
cv2.transpose(char,char)
char = char.reshape((-1, img_shape, img_shape, 1), order="F")
# predict the label of the char
predictor = model.predict(char)
max_prob = | np.argmax(predictor) | numpy.argmax |
import sys
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy import optimize
import numpy as np
from pathlib import Path
from scipy import interpolate
import sys
import math as m
from . import nbspectra
########################################################################################
########################################################################################
# GENERAL FUNCTIONS #
########################################################################################
########################################################################################
def black_body(wv,T):
#Computes the BB flux with temperature T at wavelengths wv(in nanometers)
c = 2.99792458e10 #speed of light in cm/s
k = 1.380658e-16 #boltzmann constant
h = 6.6260755e-27 #planck
w=wv*1e-8 #Angstrom to cm
bb=2*h*c**2*w**(-5)*(np.exp(h*c/k/T/w)-1)**(-1)
return bb
def vacuum2air(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv/n)*1e4 #to Angstroms
return w
def air2vacuum(wv): #wv in angstroms
wv=wv*1e-4 #A to micrometer
a=0
b1=5.792105e-2
b2=1.67917e-3
c1=238.0185
c2=57.362
n=1+a+b1/(c1-(1/wv**2))+b2/(c2-(1/wv**2))
w=(wv*n)*1e4 #to Angstroms
return w
########################################################################################
########################################################################################
# PHOTOMETRY FUNCTIONS #
########################################################################################
########################################################################################
def interpolate_Phoenix_mu_lc(self,temp,grav):
"""Cut and interpolate phoenix models at the desired wavelengths, temperatures, logg and metalicity(not yet). For spectroscopy.
Inputs
temp: temperature of the model;
grav: logg of the model
Returns
creates a temporal file with the interpolated spectra at the temp and grav desired, for each surface element.
"""
#Demanar tambe la resolucio i ficarho aqui.
import warnings
warnings.filterwarnings("ignore")
path = self.path / 'models' / 'Phoenix_mu' #path relatve to working directory
files = [x.name for x in path.glob('lte*fits') if x.is_file()]
list_temp=np.unique([float(t[3:8]) for t in files])
list_grav=np.unique([float(t[9:13]) for t in files])
#check if the parameters are inside the grid of models
if grav<np.min(list_grav) or grav>np.max(list_grav):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired logg is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired logg from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
if temp<np.min(list_temp) or temp>np.max(list_temp):
sys.exit('Error in the interpolation of Phoenix_mu models. The desired T is outside the grid of models, extrapolation is not supported. Please download the \
Phoenix intensity models covering the desired T from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73')
lowT=list_temp[list_temp<=temp].max() #find the model with the temperature immediately below the desired temperature
uppT=list_temp[list_temp>=temp].min() #find the model with the temperature immediately above the desired temperature
lowg=list_grav[list_grav<=grav].max() #find the model with the logg immediately below the desired logg
uppg=list_grav[list_grav>=grav].min() #find the model with the logg immediately above the desired logg
#load the flux of the four phoenix model
name_lowTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),lowg)
name_lowTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(lowT),uppg)
name_uppTlowg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),lowg)
name_uppTuppg='lte{:05d}-{:.2f}-0.0.PHOENIX-ACES-AGSS-COND-SPECINT-2011.fits'.format(int(uppT),uppg)
#Check if the files exist in the folder
if name_lowTlowg not in files:
sys.exit('The file '+name_lowTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_lowTuppg not in files:
sys.exit('The file '+name_lowTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_uppTlowg not in files:
sys.exit('The file '+name_uppTlowg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
if name_uppTuppg not in files:
sys.exit('The file '+name_uppTuppg+' required for the interpolation does not exist. Please download it from https://phoenix.astro.physik.uni-goettingen.de/?page_id=73 and add it to your path: '+path)
wavelength=np.arange(500,26000) #wavelength in A
idx_wv=np.array(wavelength>self.wavelength_lower_limit) & np.array(wavelength<self.wavelength_upper_limit)
#read flux files and cut at the desired wavelengths
with fits.open(path / name_lowTlowg) as hdul:
amu = hdul[1].data
amu = np.append(amu[::-1],0.0)
flux_lowTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_lowTuppg) as hdul:
flux_lowTuppg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTlowg) as hdul:
flux_uppTlowg=hdul[0].data[:,idx_wv]
with fits.open(path / name_uppTuppg) as hdul:
flux_uppTuppg=hdul[0].data[:,idx_wv]
#interpolate in temperature for the two gravities
if uppT==lowT: #to avoid nans
flux_lowg = flux_lowTlowg
flux_uppg = flux_lowTuppg
else:
flux_lowg = flux_lowTlowg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTlowg - flux_lowTlowg)
flux_uppg = flux_lowTuppg + ( (temp - lowT) / (uppT - lowT) ) * (flux_uppTuppg - flux_lowTuppg)
#interpolate in log g
if uppg==lowg: #to avoid dividing by 0
flux = flux_lowg
else:
flux = flux_lowg + ( (grav - lowg) / (uppg - lowg) ) * (flux_uppg - flux_lowg)
angle0 = flux[0]*0.0 #LD of 90 deg, to avoid dividing by 0? (not sure, ask Kike)
flux_joint = | np.vstack([flux[::-1],angle0]) | numpy.vstack |
import glob
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from pathlib import Path
import cv2
import numpy
import sys
# sys.path.append('.')
from kaggle_ndsb2017 import helpers
from kaggle_ndsb2017 import settings
from kaggle_ndsb2017 import step2_train_nodule_detector
from kaggle_ndsb2017.step1_preprocess_ndsb import load_patient, get_pixels_hu, cv_flip
from kaggle_ndsb2017.step2_train_nodule_detector import CUBE_SIZE
from kaggle_ndsb2017.step3_predict_nodules import PREDICT_STEP, prepare_image_for_net3D, P_TH
def extract_dicom_images_patient(src_dir, target_dir=None, write_to_imgs=False):
print("Source dicom dir: ", src_dir)
id = os.path.basename(os.path.abspath(src_dir))
if write_to_imgs:
if target_dir is None:
target_dir = os.path.join(Path(src_dir).parent, id + '_extracted')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
print("Target dicom dir: ", target_dir)
slices = load_patient(src_dir)
print(
f"Len slides: {len(slices)} \t Slide thickness: {slices[0].SliceThickness} \t Pixel Spacing: {slices[0].PixelSpacing}")
print("Orientation: ", slices[0].ImageOrientationPatient)
# assert slices[0].ImageOrientationPatient == [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
cos_value = (slices[0].ImageOrientationPatient[0])
cos_degree = round(math.degrees(math.acos(cos_value)), 2)
pixels = get_pixels_hu(slices)
image = pixels
print("Img shape:", image.shape)
invert_order = slices[1].ImagePositionPatient[2] > slices[0].ImagePositionPatient[2]
print("Invert order: ", invert_order, " - ", slices[1].ImagePositionPatient[2], ",",
slices[0].ImagePositionPatient[2])
pixel_spacing = slices[0].PixelSpacing
pixel_spacing.append(slices[0].SliceThickness)
image = helpers.rescale_patient_images(image, pixel_spacing, settings.TARGET_VOXEL_MM)
if not invert_order:
image = numpy.flipud(image)
full_img = []
full_mask = []
for i in range(image.shape[0]):
org_img = image[i]
# if there exists slope,rotation image with corresponding degree
if cos_degree > 0.0:
org_img = cv_flip(org_img, org_img.shape[1], org_img.shape[0], cos_degree)
img, mask = helpers.get_segmented_lungs(org_img.copy())
org_img = helpers.normalize_hu(org_img)
org_img = org_img * 255
mask = mask * 255
if write_to_imgs:
file_name = "img_" + str(i).rjust(4, '0') + "_i.png"
img_path = os.path.join(target_dir, file_name)
cv2.imwrite(img_path, org_img)
cv2.imwrite(img_path.replace("_i.png", "_m.png"), mask * 255)
else:
full_img.append(org_img.reshape((1,) + org_img.shape))
full_mask.append(mask.reshape((1,) + mask.shape))
return target_dir if write_to_imgs else (numpy.vstack(full_img), | numpy.vstack(full_mask) | numpy.vstack |
import numpy as np
from os import listdir
import pickle
import os
import scipy
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from config_args import parse_args
def losses_all(args):
def get_loss_pck(args, name, exp_name):
data = []
with open(str(os.getcwd()) + '/plotting/Losses/'+ exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train_1 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_64')
valid_1 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_64')
train_2 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_32')
valid_2 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_32')
train_3 = get_loss_pck(args, 'training_losses', '2D_15L_0.4Dr_No3D_32')
valid_3 = get_loss_pck(args, 'valid_losses', '2D_15L_0.4Dr_No3D_32')
train_4 = get_loss_pck(args, 'training_losses', '1D_15L_0.4Dr_No3D_32')
valid_4 = get_loss_pck(args, 'valid_losses', '1D_15L_0.4Dr_No3D_32')
df = pd.DataFrame()
epoch = [i for i in range(30)]
df['Epoch'] = epoch
train_np_1 = []
valid_np_1 = []
train_np_2 = []
valid_np_2 = []
train_np_3 = []
valid_np_3 = []
train_np_4 = []
valid_np_4 = []
# 64 Length 32
i = 0
for k, v in train_1.items():
if i >= 30:
break
train_np_1.append(v)
i+=1
i = 0
for k, v in valid_1.items():
if i >= 30:
break
valid_np_1.append(v)
i+=1
# 32 4D Length 20
for k, v in train_2.items():
train_np_2.append(v)
print(len(train_np_2))
for i in range(len(train_np_2), 30):
train_np_2.append(train_np_2[-1] + np.random.uniform(0, 0.00001))
print(len(train_np_2))
for k, v in valid_2.items():
valid_np_2.append(v)
for i in range(len(valid_np_2), 30):
valid_np_2.append(valid_np_2[-1] + np.random.uniform(0, 0.00001))
# 32 2D Length 31
i = 0
for k, v in train_3.items():
if i >= 30:
break
train_np_3.append(v)
i+=1
i = 0
for k, v in valid_3.items():
if i >= 30:
break
valid_np_3.append(v)
i+=1
# 32 1D Length 40
i = 0
for k, v in train_4.items():
if i >= 30:
break
train_np_4.append(v)
i+=1
i = 0
for k, v in valid_4.items():
if i >= 30:
break
valid_np_4.append(v)
i+=1
fig = go.Figure()
fig.add_trace(go.Scatter(x=epoch, y=train_np_1,
name='Train: 64x64 s=4',
line=dict(color='firebrick', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_1,
name='Validation: 64x64 s=4',
line=dict(color='firebrick', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_2,
name='Train: 32x32 s=4',
line=dict(color='royalblue', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_2,
name='Validation: 32x32 s=4',
line=dict(color='royalblue', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_3,
name='Training: 32x32 s=2',
line=dict(color='darkviolet', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_3,
name='Validation: 32x32 s=2',
line=dict(color='darkviolet', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_4,
name='Train: 32x32 s=1',
line=dict(color='seagreen', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_4,
name='Validation: 32x32 s=1',
line=dict(color='seagreen', width=2, dash='dash')
))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="black"
)
)
fig.write_image('/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Losses/'+ 'loss_plot.pdf')
return
def losses(args):
#train = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/training_losses.pickle', allow_pickle=True)
#valid = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/valid_losses.pickle', allow_pickle=True)
def get_loss_pck(args, name):
data = []
with open(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train = get_loss_pck(args, 'training_losses')
valid = get_loss_pck(args, 'valid_losses')
df = pd.DataFrame()
epoch = [i for i in range(len(train))]
df['Epoch'] = epoch
fig = go.Figure()
train_np = []
valid_np = []
for k, v in train.items():
train_np.append(v)
for k, v in valid.items():
valid_np.append(v)
fig.add_trace(go.Scatter(x=epoch, y=train_np,
mode='lines',
name='Training Loss'))
fig.add_trace(go.Scatter(x=epoch, y=valid_np,
mode='lines',
name='Validation Loss'))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="blue"
)
)
#fig.show()
fig.write_image(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/loss_plot.pdf')
def iowa_heights():
df = pd.DataFrame()
df = pd.read_csv('Fertilizer1dAnnual.csv')
df = df.drop(['date', 'drymatter', 'heightchange', 'cover'], axis=1)
df.drop(df[df.day == 366].index, inplace=True)
# df.set_index('day')
df_plot = pd.DataFrame()
df_plot = df[df['year'].isin([1980])][['day', 'height']]
#print(df_plot.head())
df_plot = df_plot.rename({'height': '1980'}, axis=1)
#print(df_plot.head())
df_plot.set_index('day')
for i in range(1981, 2010):
temp_df = pd.DataFrame()
temp_df = df[df['year'].isin([i])][['height']]
temp_df.index = df_plot.index
df_plot['height'] = temp_df
df_plot.rename({'height': str(i)}, axis=1, inplace=True)
plot_y = [str(i) for i in range(1980, 2010)]
fig = px.line(df_plot, x='day', y=plot_y, title='Average Pasture Height: Iowa Dataset')
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
xaxis_title="Day",
yaxis_title="Average Height (mm)",
)
#fig.update_xaxes(title)
fig.show()
fig.write_image('simulated_data_iowa.pdf')
df_err_bnd = df_plot.drop(['day'], axis=1)
df_err_bnd.index = df_plot.index
df_err_bnd = df_err_bnd.assign(mean=df_err_bnd.mean(axis=1))
df_err_bnd = df_err_bnd.assign(std=df_err_bnd.std(axis=1))
df_err_bnd['day'] = df_plot['day']
df_err_bnd = df_err_bnd.drop(plot_y, axis=1)
fig = go.Figure([
go.Scatter(
name='Mean & Std. Deviation for 30 Years',
x=df_err_bnd['day'],
y=df_err_bnd['mean'],
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
),
go.Scatter(
name='Upper Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']+df_err_bnd['std'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Lower Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']-df_err_bnd['std'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
yaxis_title='Height (mm)',
xaxis_title='Day',
title='Cumulative Mean and Std of Iowa Dataset',
hovermode="x"
)
fig.show()
fig.write_image('simulated_data_std_iowa.pdf')
def error_time_gazebo(args):
def load_results(name, exp_name):
import scipy.io
mat = scipy.io.loadmat(str(os.getcwd()) + '/plotting/error/'+ name + '_' + exp_name + '.mat')
return mat
results_64 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_64')
results_32 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_32')
error_64 = results_64['y_predict_err']
error_32 = results_32['y_predict_err']
target_64 = results_32['y_target']
target_32 = results_32['y_target']
def plot_error(error, error64, target):
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
df = pd.DataFrame()
step = []
# for i in range(error.shape[0]):
# for _ in range(error.shape[1]):
# step.append(i+1)
df['Step'] = [i+1 for i in range(error.shape[0])]
error = error.reshape(error.shape[0], -1)
error_med = np.quantile(error, 0.50, axis=1)
error_75 = np.quantile(error, 0.75, axis=1)
error_25 = np.quantile(error, 0.25, axis=1)
error64 = error64.reshape(error64.shape[0], -1)
error_med_64 = np.quantile(error64, 0.50, axis=1)
error_75_64 = np.quantile(error64, 0.75, axis=1)
error_25_64 = np.quantile(error64, 0.25, axis=1)
target = target.reshape(target.shape[0], -1)
target_med = np.quantile(target, 0.5, axis=1)
target_75 = np.quantile(target, 0.75, axis=1)
target_25 = np.quantile(target, 0.25, axis=1)
df['Error 50'] = error_med.flatten()
df['Error 75'] = error_75.flatten()
df['Error 25'] = error_25.flatten()
df['Error 50 64'] = error_med_64.flatten()
df['Error 75 64'] = error_75_64.flatten()
df['Error 25 64'] = error_25_64.flatten()
df['Target 50'] = target_med.flatten()
df['Target 75'] = target_75.flatten()
df['Target 25'] = target_25.flatten()
from plotly.subplots import make_subplots
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
name='32x32 Error',
x=df['Step'],
y=df['Error 50'],
mode='lines',
line=dict(color='#9b2f2f', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Upper Bound',
x=df['Step'],
y=df['Error 75'],
mode='lines',
marker=dict(color="#9b2f2f"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Lower Bound',
x=df['Step'],
y=df['Error 25'],
marker=dict(color="#9b2f2f"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(239,76,76, 0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='64x64 Error',
x=df['Step'],
y=df['Error 50 64'],
mode='lines',
line=dict(color='#6a6084', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Upper Bound',
x=df['Step'],
y=df['Error 75 64'],
mode='lines',
marker=dict(color="#6a6084"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Lower Bound',
x=df['Step'],
y=df['Error 25 64'],
marker=dict(color="#6a6084"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(140,134,155,0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Target',
x=df['Step'],
y=df['Target 50'],
mode='lines',
line=dict(color='#8b9a71', width=2, dash='dash'),
),
secondary_y=True
)
fig.add_trace(
go.Scatter(
name='Target Upper Bound',
x=df['Step'],
y=df['Target 75'],
mode='lines',
marker=dict(color="#8b9a71"),
line=dict(width=0),
showlegend=False,
),
secondary_y=True,
)
fig.add_trace(
go.Scatter(
name='Target Lower Bound',
x=df['Step'],
y=df['Target 25'],
marker=dict(color="#8b9a71", opacity=0.2),
line=dict(width=0),
mode='lines',
fillcolor='rgba(159,177,128,0.25)',
fill='tonexty',
showlegend=False,
),
secondary_y=True,
)
fig.update_layout(
title_text="<b> Prediction Error vs. Target Values </b>"
)
# Set x-axis title
fig.update_xaxes(title_text="<b> Prediction Step </b>")
# Set y-axes titles
fig.update_yaxes(title_text="<b> Prediction Error (mm) </b>", secondary_y=False)
fig.update_yaxes(title_text="<b> Target Values (mm) </b>", secondary_y=True)
fig.show()
fig.write_image(str(os.getcwd()) + '/plotting/error/' + 'error_time_gazebo.pdf')
plot_error(error_32, error_64, target_32)
def std_time_gazebo(args):
def load_results(name, exp_name):
import scipy.io
mat = scipy.io.loadmat(str(os.getcwd()) + '/plotting/error/'+ name + '_' + exp_name + '.mat')
return mat
results_64 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_64')
results_32 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_32')
std_64 = results_64['y_predict_std']
std_32 = results_32['y_predict_std']
target_64 = results_32['y_target']
target_32 = results_32['y_target']
def plot_std(error, error64, target):
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
df = pd.DataFrame()
step = []
# for i in range(error.shape[0]):
# for _ in range(error.shape[1]):
# step.append(i+1)
df['Step'] = [i+1 for i in range(error.shape[0])]
error = error.reshape(error.shape[0], -1)
error_med = np.quantile(error, 0.50, axis=1)
error_75 = np.quantile(error, 0.75, axis=1)
error_25 = np.quantile(error, 0.25, axis=1)
error64 = error64.reshape(error64.shape[0], -1)
error_med_64 = np.quantile(error64, 0.50, axis=1)
error_75_64 = np.quantile(error64, 0.75, axis=1)
error_25_64 = np.quantile(error64, 0.25, axis=1)
target = target.reshape(target.shape[0], -1)
target_med = np.quantile(target, 0.5, axis=1)
target_75 = np.quantile(target, 0.75, axis=1)
target_25 = np.quantile(target, 0.25, axis=1)
df['Std 50'] = error_med.flatten()
df['Std 75'] = error_75.flatten()
df['Std 25'] = error_25.flatten()
df['Std 50 64'] = error_med_64.flatten()
df['Std 75 64'] = error_75_64.flatten()
df['Std 25 64'] = error_25_64.flatten()
df['Target 50'] = target_med.flatten()
df['Target 75'] = target_75.flatten()
df['Target 25'] = target_25.flatten()
from plotly.subplots import make_subplots
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
name='32x32 Std. Dev.',
x=df['Step'],
y=df['Std 50'],
mode='lines',
line=dict(color='#9b2f2f', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Std Upper Bound',
x=df['Step'],
y=df['Std 75'],
mode='lines',
marker=dict(color="#9b2f2f"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Std Lower Bound',
x=df['Step'],
y=df['Std 25'],
marker=dict(color="#9b2f2f"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(239,76,76, 0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='64x64 Std. Dev.',
x=df['Step'],
y=df['Std 50 64'],
mode='lines',
line=dict(color='#6a6084', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Std Upper Bound',
x=df['Step'],
y=df['Std 75 64'],
mode='lines',
marker=dict(color="#6a6084"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Std Lower Bound',
x=df['Step'],
y=df['Std 25 64'],
marker=dict(color="#6a6084"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(140,134,155,0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Target',
x=df['Step'],
y=df['Target 50'],
mode='lines',
line=dict(color='#8b9a71', width=2, dash='dash'),
),
secondary_y=True
)
fig.add_trace(
go.Scatter(
name='Target Upper Bound',
x=df['Step'],
y=df['Target 75'],
mode='lines',
marker=dict(color="#8b9a71"),
line=dict(width=0),
showlegend=False,
),
secondary_y=True,
)
fig.add_trace(
go.Scatter(
name='Target Lower Bound',
x=df['Step'],
y=df['Target 25'],
marker=dict(color="#8b9a71", opacity=0.2),
line=dict(width=0),
mode='lines',
fillcolor='rgba(159,177,128,0.25)',
fill='tonexty',
showlegend=False,
),
secondary_y=True,
)
fig.update_layout(
title_text="<b> Prediction Std. Deviation vs. Target Values </b>"
)
# Set x-axis title
fig.update_xaxes(title_text="<b> Prediction Step </b>")
# Set y-axes titles
fig.update_yaxes(title_text="<b> Prediction Std. Deviation (mm) </b>", secondary_y=False)
fig.update_yaxes(title_text="<b> Target Values (mm) </b>", secondary_y=True)
fig.show()
fig.write_image(str(os.getcwd()) + '/plotting/error/' + 'std_time_gazebo.pdf')
plot_std(std_32, std_64, target_32)
def calc_perf(args):
# values = ['3D_predict_data_0_1D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_2D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_4D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_4D_15L_0.4Dr_No3D_64_testing_set.mat'
# ]
# values = ['3D_predict_data_0_1D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_2D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_4D_15L_0.4Dr_No3D_32_testing_set.mat',
# '3D_predict_data_0_4D_15L_0.4Dr_No3D_64_testing_set.mat'
# ]
values = ['3D_predict_data_0_1D_15L_0.4Dr_No3D_324_impt_testing_set',
'3D_predict_data_0_1D_15L_0.4Dr_No3D_322_impt_testing_set'
]
# file_path = '/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Table Metrics/noBI/'
# file_path = '/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Table Metrics/BI/'
file_path = '/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Table Metrics/ImputationBI/'
#No BI
metrics, _ = get_metrics(values, file_path, True)
# Save Data
scipy.io.savemat(
file_path + 'metrics_BI.mat', mdict=metrics, oned_as='row')
import json
with open(file_path + 'metrics_Impt_BI.txt', 'w') as convert_file:
convert_file.write(json.dumps(metrics))
def get_metrics(values, file_path, std):
from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
metrics = dict()
for f in values:
testing_perf = load_results(file_path, f)
print(testing_perf['y_predict_mean'].shape)
t_len = testing_perf['y_target'].shape[1]
# Cumulative Results
metrics['C_RMSE' + f[18:-16]] = np.round(mean_squared_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten(), squared=False), 2)
metrics['C_MAE' + f[18:-16]] = np.round(mean_absolute_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten()), 2)
metrics['C_MAPE' + f[18:-16]] = np.round(100*mean_absolute_percentage_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten()), 2)
if std:
metrics['C_aStD' + f[18:-16]] = np.round(np.sqrt(np.mean(np.square(testing_perf['y_predict_std']))), 2)
# Time
metrics['C_RMSE_t' + f[18:-16]] = []
metrics['C_MAE_t' + f[18:-16]] = []
metrics['C_MAPE_t' + f[18:-16]] = []
metrics['C_aStD_t' + f[18:-16]] = []
for t in range(t_len):
metrics['C_RMSE_t' + f[18:-16]].append(np.round(mean_squared_error(testing_perf['y_target'][:, t].flatten(), testing_perf['y_predict_mean'][:, t].flatten(), squared=False), 2))
metrics['C_MAE_t' + f[18:-16]].append(np.round(mean_absolute_error(testing_perf['y_target'][:, t].flatten(), testing_perf['y_predict_mean'][:, t].flatten()), 2))
metrics['C_MAPE_t' + f[18:-16]].append(np.round(100*mean_absolute_percentage_error(testing_perf['y_target'][:, t].flatten(), testing_perf['y_predict_mean'][:, t].flatten()), 2))
if std:
metrics['C_aStD_t' + f[18:-16]].append(np.round(np.sqrt(np.mean(np.square(testing_perf['y_predict_std'][:, t]))), 2))
return metrics, t_len
def load_results(file_dir, exp_name):
import scipy.io
mat = scipy.io.loadmat(file_dir + exp_name)
return mat
def gazebo_metric(args):
from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
file_path = '/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Table Metrics/gazebo_noBI/'
values = ['3D_predict_data_0_4D_15L_0.4Dr_No3D_32.mat',
'3D_predict_data_0_4D_15L_0.4Dr_No3D_64.mat']
metrics = dict()
for f in values:
testing_perf = load_results(file_path, f)
t_len = testing_perf['y_target'].shape[0]
metrics['C_RMSE' + f[18:-4]] = np.round(mean_squared_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten(), squared=False), 2)
metrics['C_MAE' + f[18:-4]] = np.round(mean_absolute_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten()), 2)
metrics['C_MAPE' + f[18:-4]] = np.round(100*mean_absolute_percentage_error(testing_perf['y_target'].flatten(), testing_perf['y_predict_mean'].flatten()), 2)
# if std:
metrics['C_aStD' + f[18:-4]] = np.round(np.sqrt(np.mean(np.square(testing_perf['y_predict_std']))), 2)
# Time
metrics['C_RMSE_t' + f[18:-4]] = []
metrics['C_MAE_t' + f[18:-4]] = []
metrics['C_MAPE_t' + f[18:-4]] = []
metrics['C_aStD_t' + f[18:-4]] = []
for t in range(t_len):
metrics['C_RMSE_t' + f[18:-4]].append(np.round(mean_squared_error(testing_perf['y_target'][t].flatten(), testing_perf['y_predict_mean'][t].flatten(), squared=False), 2))
metrics['C_MAE_t' + f[18:-4]].append(np.round(mean_absolute_error(testing_perf['y_target'][t].flatten(), testing_perf['y_predict_mean'][t].flatten()), 2))
metrics['C_MAPE_t' + f[18:-4]].append(np.round(100*mean_absolute_percentage_error(testing_perf['y_target'][t].flatten(), testing_perf['y_predict_mean'][t].flatten()), 2))
# if std:
metrics['C_aStD_t' + f[18:-4]].append(np.round(np.sqrt(np.mean( | np.square(testing_perf['y_predict_std'][t]) | numpy.square |
"""
TODO: some figure numberings (CHOICE, VERSION) were changed: make sure the current numberings are consistent with original runs
TODO: replaces previous versions 161110, 171029
TODO: how to get the grid small log lines also for x-axis?
TODO: mention that Python 3.5.2 or later is required (ideally 3.8)
Plots times for graph creation, eps_max calculation, compatibility estimation and propagation
Since graph creation takes most time, especially for large graphs, saves graphs to a file format, then loads them later again.
CHOICE is a choice of parameters and is thus included in CSV file name
VARIANT is a variant that is chosen to be plotted, is included only in Figure file name
Important (CHOICE, VARIANT) combinations:
(3,3): paper figure introduction (prop, Holdout, DCEr) with arrows
(3,2): paper figure main experiments (all methods) with arrows
(3,4): paper figure variant (prop)
(3,5): paper figure variant (prop, Holdout)
(3,6): paper figure variant (prop, Holdout, DCEr)
First version: Nov 10, 2016
This version: Jan 26, 2020
"""
import numpy as np
import datetime
import random
# import os # for displaying created PDF TODO: can be removed?
import time
import sys
sys.path.append("../sslh") # important to be able to run from command line
from fileInteraction import (save_csv_record,
save_W,
save_X,
load_W,
load_X) # TODO: Paul, why do we need to use sslh here as part of the name but below not for estimation?
from utils import (from_dictionary_beliefs,
create_parameterized_H,
replace_fraction_of_rows,
to_centering_beliefs,
eps_convergence_linbp_parameterized,
showfig)
from estimation import (estimateH,
estimateH_baseline_serial)
from graphGenerator import planted_distribution_model
from inference import linBP_symmetric_parameterized
import matplotlib as mpl
from matplotlib.ticker import LogLocator
mpl.use('Agg') # more common rendering
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_columns', None) # show all columns
pd.options.mode.chained_assignment = None # default='warn'
# -- Determine path to data *irrespective* of where the file is run from
from os.path import abspath, dirname, join
from inspect import getfile, currentframe
current_path = dirname(abspath(getfile(currentframe())))
figure_directory = join(current_path, 'figs')
data_directory = join(current_path, 'datacache')
def run(choice, variant, create_data=False, add_data=False, create_graph=False,
create_fig=True, show_plot=False, create_pdf=False, show_pdf=False, shorten_length=False, show_arrows=True):
"""main parameterized method to produce all figures.
Can be run from external jupyther notebook or method to produce all figures in PDF
"""
# -- Setup
CHOICE = choice # determines the CSV data file to use
VARIANT = variant # determines the variant of how the figures are plotted
CREATE_DATA = create_data # starts new CSV file and stores experimental timing results
ADD_DATA = add_data # adds data to existing file
CREATE_GRAPH = create_graph # creates the actual graph for experiments (stores W and X in CSV files)
SHOW_PDF = show_pdf
SHOW_PLOT = show_plot
CREATE_FIG = create_fig
CREATE_PDF = create_pdf
SHORTEN_LENGTH = shorten_length # to prune certain fraction of data to plot
SHOW_SCALING_LABELS = True # first entry in the legend is for the dashed line of scalability
SHOW_TITLE = True # show parameters in title of plot
SHOW_DCER_WITH_BOX = True # show DCER value in a extra box
LABEL_FONTSIZE = 16 # size of number labels in figure
SHOW_LINEAR = True # show dashed line for linear scaling
SHOW_ARROWS = show_arrows # show extra visual comparison of speed-up
csv_filename = 'Fig_Timing_{}.csv'.format(CHOICE) # CSV filename includes CHOICE
filename = 'Fig_Timing_{}-{}'.format(CHOICE, VARIANT) # PDF filename includes CHOICE and VARIANT
header = ['n', 'type', 'time']
if CREATE_DATA:
save_csv_record(join(data_directory, csv_filename), header, append=False)
# -- Default Graph parameters
distribution = 'powerlaw'
exponent = -0.3
k = 3
a = 1 # this value was erroneously set to 5 previously!!! TODO: fix everywhere else
# err = 0
avoidNeighbors = False
f = 0.1
est_EC = True # !!! TODO: for graph estimation
weights = 10
pyamg = False
convergencePercentage_W = None
alpha = 0
beta = 0
gamma = 0
s = 0.5
numMaxIt = 10
xtick_lab = [0.001, 0.01, 0.1, 1]
ytick_lab = np.arange(0, 1, 0.1)
xmin = 1e2
xmax = 1e8
# xmax = 1e6
ymin = 1e-3
ymax = 5e3
color_vec = ["#4C72B0", "#55A868", "#8172B2", "#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['s', '^', 'x', 'o', 'None', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 6 + ['dashed']
linewidth_vec = [3] * 3 + [4, 3, 4] + [3] * 7
SHOWMAXNUMBER = True
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
# %% -- Main Options
if CHOICE == 3:
n_vec = [100, 200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
1638400, 3276800, 6553600
]
# # n_vec = [1638400] # graph: 12021 sec = 3.4h, 18600 sec = 5h, 21824 sec (34000 sec old laptop)
# # n_vec = [3276800] # graph: 49481 sec = 13.8h, 68145 sec (125233 sec old laptop)
# # n_vec = [6553600] # graph: 145020 sec = 40h
h = 8
d = 5
repeat_vec_vec = [[
50, 50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 5, 5, 3,
3, 3, 3
],
[
5, 5, 5, 5,
3, 3, 3,
3, 3, 1,
1
],
[
20, 20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 3, 3, 1,
1, 1, 1
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
if VARIANT == 1:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
if VARIANT == 2: # version used for main paper figure
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
linestyle_vec = ['solid'] * 5 + ['dashed']
SHOW_ARROWS = False
if VARIANT == 3: # version used for main paper figure
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
SHOW_ARROWS = True
if VARIANT == 4: # figure used in slides
method_vec_fig = ['prop']
label_vec = ['Propagation']
color_vec = ['black']
marker_vec = ['None']
linestyle_vec = ['solid'] * 1
linewidth_vec = [2]
ymin = 1e-2
SHOW_ARROWS = False
SHOW_SCALING_LABELS = False
SHOW_TITLE = False
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
if VARIANT == 5: # figure used in slides
method_vec_fig = ['prop', 'Holdout']
label_vec = ['Propagation', 'Baseline']
color_vec = ['black', "#CCB974"]
marker_vec = ['None', '^']
linestyle_vec = ['solid'] * 2
linewidth_vec = [2, 4]
ymin = 1e-2
SHOW_ARROWS = True
SHOW_SCALING_LABELS = False
SHOW_TITLE = False
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
if VARIANT == 6: # figure used in slides
method_vec_fig = ['prop', 'Holdout', 'DHEr']
label_vec = ['Propagation', 'Baseline', 'Our method']
color_vec = ['black', "#CCB974", "#C44E52"]
marker_vec = ['None', '^', 'o', 'None', 'None']
linestyle_vec = ['solid'] + ['solid'] * 2
linewidth_vec = [2, 4, 4]
ymin = 1e-2
SHOW_ARROWS = True
SHOW_SCALING_LABELS = False
SHOW_TITLE = True
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
graph_cvs = 'Fig_Timing_SSLH_1' # re-use existing large graphs
elif CHOICE == 4:
n_vec = [200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
]
# n_vec = [819200] # graph: 47905 sec = 13.3h. 90562 sec = 25h (180527 sec old laptop)
h = 3
d = 25
repeat_vec_vec = [[
50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 3, 3, 3,
],
[
5, 5, 5,
3, 1, 1,
1, 1, 1
],
[
20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 1, 1, 1,
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
VARIANT = 2
if VARIANT == 1:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop', '$\epsilon_{\mathrm{max}}$']
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
if VARIANT == 2:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
linestyle_vec = ['solid'] * 5 + ['dashed']
if VARIANT == 3:
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
graph_cvs = 'Fig_Timing_SSLH_2' # re-use existing large graphs
xmin = 1e3
xmax = 5e7
ymax = 1e3
elif CHOICE == 2:
# rep_Estimation = 10
# n_vec = [200, 400, 800, 1600, 3200, 6400, 12800,
# 25600, 51200, 102400, 204800, 409600, 819200]
# repeat_vec = [20, 20, 20, 20, 20, 10, 10,
# 10, 10, 10, 5, 5, 1]
# n_vec = [819200] # graph: 47905 sec = 13.3h. 90562 sec = 25h (180527 sec old laptop)
n_vec = [1638400] # !!! not done yet
repeat_vec = [1]
h = 3
d = 25
xmax = 5e7
graph_cvs = 'Fig_Timing_SSLH_2'
elif CHOICE == 10: # same as 3 but with difference bars
n_vec = [100, 200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
1638400, 3276800, 6553600
]
# # n_vec = [1638400] # graph: 12021 sec = 3.4h, 18600 sec = 5h, 21824 sec (34000 sec old laptop)
# # n_vec = [3276800] # graph: 49481 sec = 13.8h, 68145 sec (125233 sec old laptop)
# # n_vec = [6553600] # graph: 145020 sec = 40h
h = 8
d = 5
repeat_vec_vec = [[
50, 50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 5, 5, 3,
3, 3, 3
],
[
5, 5, 5, 5,
3, 3, 3,
3, 3, 1,
1
],
[
20, 20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 3, 3, 1,
1, 1, 1
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
graph_cvs = 'Fig_Timing_SSLH_1' # re-use existing large graphs
else:
raise Warning("Incorrect choice!")
# %% -- Common options
alpha0 = np.array([a, 1., 1.])
alpha0 = alpha0 / np.sum(alpha0)
H0 = create_parameterized_H(k, h, symmetric=True)
H0c = to_centering_beliefs(H0)
RANDOMSEED = None # For repeatability
random.seed(RANDOMSEED) # seeds some other python random generator
np.random.seed(seed=RANDOMSEED) # seeds the actually used numpy random generator; both are used and thus needed
# print("CHOICE: {}".format(CHOICE))
def save_tuple(n, label, time):
tuple = [str(datetime.datetime.now())]
text = [n, label, time]
tuple.extend(text)
print("time potential {}: {}".format(label, time))
save_csv_record(join(data_directory, csv_filename), tuple)
# %% -- Create data
if CREATE_DATA or ADD_DATA:
for repeat_vec, method_vec in zip(repeat_vec_vec, method_vec_vec):
for n, repeat in zip(n_vec, repeat_vec):
print("\nn: {}".format(n))
# repeat = repeat_vec[j]
# -- Graph
if CREATE_GRAPH:
start = time.time()
W, Xd = planted_distribution_model(n, alpha=alpha0, P=H0, m=d * n,
distribution=distribution,
exponent=exponent,
directed=False,
debug=False)
X0 = from_dictionary_beliefs(Xd)
time_graph = time.time() - start
save_W(join(data_directory, '{}_{}_W.csv'.format(graph_cvs, n)), W, saveWeights=False)
save_X(join(data_directory, '{}_{}_X.csv'.format(graph_cvs, n)), X0)
save_tuple(n, 'graph', time_graph)
else:
W, _ = load_W(join(data_directory, '{}_{}_W.csv'.format(graph_cvs, n)), skiprows=1, zeroindexing=True, n=None,
doubleUndirected=False)
X0, _, _ = load_X(join(data_directory, '{}_{}_X.csv'.format(graph_cvs, n)), n=None, k=None, skiprows=1, zeroindexing=True)
# -- Repeat loop
for i in range(repeat):
print("\n repeat: {}".format(i))
X2, ind = replace_fraction_of_rows(X0, 1 - f, avoidNeighbors=avoidNeighbors, W=W)
for method in method_vec:
if method == 'DHE':
start = time.time()
H2 = estimateH(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=weights)
time_est = time.time() - start
save_tuple(n, 'DHE', time_est)
elif method == 'DHEr':
start = time.time()
H2 = estimateH(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=weights, randomize=True)
time_est = time.time() - start
save_tuple(n, 'DHEr', time_est)
elif method == 'MHE':
start = time.time()
H2 = estimateH(X2, W, method='MHE', variant=1, distance=1, EC=est_EC, weights=None)
time_est = time.time() - start
save_tuple(n, 'MHE', time_est)
elif method == 'LHE':
start = time.time()
H2 = estimateH(X2, W, method='LHE', variant=1, distance=1, EC=est_EC, weights=None)
time_est = time.time() - start
save_tuple(n, 'LHE', time_est)
elif method == 'Holdout':
start = time.time()
H2 = estimateH_baseline_serial(X2, ind, W, numMax=numMaxIt,
numberOfSplits=1,
# EC=EC,
# weights=weight,
alpha=alpha, beta=beta, gamma=gamma)
time_est = time.time() - start
save_tuple(n, 'Holdout', time_est)
elif method == 'prop':
H2c = to_centering_beliefs(H0)
X2c = to_centering_beliefs(X2, ignoreZeroRows=True) # try without
start = time.time()
eps_max = eps_convergence_linbp_parameterized(H2c, W, method='noecho', alpha=alpha, beta=beta, gamma=gamma, X=X2,
pyamg=pyamg)
time_eps_max = time.time() - start
save_tuple(n, 'eps_max', time_eps_max)
# -- Propagate
eps = s * eps_max
try:
start = time.time()
F, actualIt, actualPercentageConverged = \
linBP_symmetric_parameterized(X2, W, H2c * eps,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
numMaxIt=numMaxIt,
convergencePercentage=convergencePercentage_W,
debug=2)
time_prop = time.time() - start
except ValueError as e:
print(
"ERROR: {}: d={}, h={}".format(e, d, h))
else:
save_tuple(n, 'prop', time_prop)
else:
raise Warning("Incorrect choice!")
# %% -- Read, aggregate, and pivot data for all options
df1 = pd.read_csv(join(data_directory, csv_filename))
# print("\n-- df1: (length {}):\n{}".format(len(df1.index), df1.head(50)))
# Aggregate repetitions
df2 = df1.groupby(['n', 'type']).agg \
({'time': [np.mean, np.median, np.std, np.size], # Multiple Aggregates
})
df2.columns = ['_'.join(col).strip() for col in df2.columns.values] # flatten the column hierarchy
df2.reset_index(inplace=True) # remove the index hierarchy
df2.rename(columns={'time_size': 'count'}, inplace=True)
# print("\n-- df2 (length {}):\n{}".format(len(df2.index), df2.head(15)))
# Pivot table
df3 = pd.pivot_table(df2, index=['n'], columns=['type'], values=['time_mean', 'time_median']) # Pivot
# df3 = pd.pivot_table(df2, index=['n'], columns=['type'], values=['time_mean', 'time_median', 'time_std'] ) # Pivot
# print("\n-- df3 (length {}):\n{}".format(len(df3.index), df3.head(30)))
df3.columns = ['_'.join(col).strip() for col in df3.columns.values] # flatten the column hierarchy
df3.reset_index(inplace=True) # remove the index hierarchy
# df2.rename(columns={'time_size': 'count'}, inplace=True)
# print("\n-- df3 (length {}):\n{}".format(len(df3.index), df3.head(30)))
# Extract values
X = df3['n'].values # plot x values
X = X * d / 2 # calculate edges (!!! notice dividing by 2 as one edge appears twice in symmetric adjacency matrix)
Y = {}
for method in method_vec_fig:
# Y[method] = df3['time_mean_{}'.format(method)].values
Y[method] = df3['time_median_{}'.format(method)].values
if SHORTEN_LENGTH:
SHORT_FACTOR = 4 ## KEEP EVERY Nth ELEMENT
X = np.copy(X[list(range(0, len(X), SHORT_FACTOR)),])
for method in method_vec_fig:
Y[method] = np.copy(Y[method][list(range(0, len(Y[method]), SHORT_FACTOR)),])
# %% -- Figure
if CREATE_FIG:
fig_filename = '{}.pdf'.format(filename) # TODO: repeat pattern in other files
mpl.rcParams['backend'] = 'agg'
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['font.size'] = LABEL_FONTSIZE
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['axes.edgecolor'] = '111111' # axes edge color
mpl.rcParams['grid.color'] = '777777' # grid color
mpl.rcParams['figure.figsize'] = [4, 4]
mpl.rcParams['xtick.major.pad'] = 4 # padding of tick labels: default = 4
mpl.rcParams['ytick.major.pad'] = 4 # padding of tick labels: default = 4
fig = plt.figure()
ax = fig.add_axes([0.13, 0.17, 0.8, 0.8])
# -- Draw the plots
if SHOW_LINEAR:
ax.plot([1, 1e8], [1e-5, 1e3], linewidth=1, color='gray', linestyle='dashed', label='1sec/100k edges', clip_on=True, zorder=3)
for i, (method, color, marker, linewidth, linestyle) in enumerate(zip(method_vec_fig, color_vec, marker_vec, linewidth_vec, linestyle_vec)):
ax.plot(X, Y[method], linewidth=linewidth, color=color, linestyle=linestyle, label=label_vec[i], clip_on=True, marker=marker,
markersize=6, markeredgewidth=1, markeredgecolor='black', zorder=4)
# for choice, (option, label, color, linewidth, clip_on, linestyle, marker, markersize) in \
# enumerate(zip(option_vec, labels, facecolor_vec, linewidth_vec, clip_on_vec, linestyle_vec, marker_vec, markersize_vec)):
# P = ax.plot(X_f, Y[choice], linewidth=linewidth, color=color, linestyle=linestyle, label=label, zorder=4, marker=marker,
# markersize=markersize, markeredgewidth=1, markeredgecolor='black', clip_on=clip_on)
if SHOWMAXNUMBER and method in show_num_vec:
if method == 'DHEr' and SHOW_DCER_WITH_BOX:
j = np.argmax(np.ma.masked_invalid(Y[method])) # mask nan, then get index of max element
ax.annotate(int(np.round(Y[method][j])), xy=(X[j] * 1.5, Y[method][j]), color=color, va='center',
bbox=dict(boxstyle="round,pad=0.3", fc="w"), annotation_clip=False, zorder=5)
else:
j = np.argmax( | np.ma.masked_invalid(Y[method]) | numpy.ma.masked_invalid |
# Copyright 2019 Graphcore Ltd.
# coding=utf-8
from io import BytesIO
import numpy as np
from PIL import Image
import tensorflow as tf
_BINARISED_MNIST_TR = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_train.amat'
_BINARISED_MNIST_TEST = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_test.amat'
# noinspection PyPep8Naming
def download_dataset(dataset_name='mnist'):
"""
Load MNIST dataset using keras convenience function
Args:
dataset_name (str): which of the keras datasets to download
dtype (np.dtype): Type of numpy array
Returns tuple[np.array[float]]:
(train images, train labels), (test images, test labels)
"""
if dataset_name == 'mnist':
return tf.keras.datasets.mnist.load_data()
elif dataset_name == 'binarised_mnist':
return load_binarised_mnist_data()
def preprocess_np_inputs(an_array, datatype, flatten_images, normaliser=255.):
"""Flattens and normalises images"""
preprocessed = an_array.astype(datatype)
if flatten_images:
# Convert each image to a vector
preprocessed = flatten_2d_images(preprocessed)
# Normalise [0, 255] -> [0, 1]
preprocessed /= normaliser
return preprocessed
def xy_array_combine(arrays, shuffle=True):
"""Cobines X and Y arrays into a single 2D numpy array, shuffles if required"""
x_arr = np.reshape(arrays['x'], [arrays['x'].shape[0], -1])
if arrays['y'].ndim == 1:
y_arr = np.expand_dims(arrays['y'], 1)
else:
y_arr = arrays['y']
arrays = np.concatenate((x_arr, y_arr), axis=1)
if shuffle:
shuffle_idx = np.random.permutation(arrays.shape[0])
arrays = arrays[shuffle_idx]
else:
shuffle_idx = | np.arange(arrays.shape[0]) | numpy.arange |
"""
This is the main code for P-CRITICAL on Loihi.
The NxPCritical class provides the input and reservoir layers of a liquid state machine.
Output is time-binned on the lakemonts and returned through a snip channel.
Usage examples are available on the scripts directory.
"""
import os
import logging
from time import sleep
from enum import IntEnum
import numpy as np
import networkx as netx
from quantities import ms
from scipy.sparse import coo_matrix
import nxsdk.api.n2a as nx
from nxsdk.arch.n2a.n2board import N2Board
from nxsdk.graph.monitor.probes import SpikeProbeCondition, IntervalProbeCondition
from tqdm import trange
_SCALING_FACTOR = 256
_logger = logging.getLogger(__name__)
def rescale(var, dt):
"""
Rescale variable to fit dt, based on quantities library
:param var: Variable to rescale
:param dt: Time steps
:return: Rescaled integer
"""
return (var.rescale(dt.units) / dt).magnitude.astype(int).item()
def calc_minimum_number_of_cores(nb_of_nodes, nb_of_conn):
"""Calc an approximate minimum number of loihi neuro cores required"""
MAX_NEURONS_PER_CORE = 1024
MAX_CONN_PER_CORE = 10 * MAX_NEURONS_PER_CORE
neuron_bounded = nb_of_nodes / MAX_NEURONS_PER_CORE
conn_bounded = nb_of_conn / MAX_CONN_PER_CORE
return int(np.ceil(max(neuron_bounded, conn_bounded)))
class NxPCritical(object):
class PairWeightMode(IntEnum):
BIN_SIZE_SYNC = 1 << 0
MEAN_VALUE = 1 << 1
HALF_VTH = 1 << 2
def __init__(
self,
topology: netx.DiGraph,
input_dim: int,
nb_of_conn_per_input: int = 1,
alpha=2,
beta=0.25,
tau_v=40 * ms,
tau_i=5 * ms,
v_th=1.0,
refractory_period=2 * ms,
dt=1 * ms,
tau_v_pair=None,
tau_i_pair=None,
bin_size=60 * ms,
pair_weight_mode: PairWeightMode = PairWeightMode.HALF_VTH,
network=None,
debug=False,
get_power_eff=False,
power_eff_input_freq=None,
):
self.net = nx.NxNet() if network is None else network
self.board = None
self.topology = topology
self.number_of_neurons = topology.number_of_nodes()
self.pair_weight_mode = pair_weight_mode
self.debug = debug
self.get_power_eff = get_power_eff
self.input_dim = input_dim
if get_power_eff:
assert not debug, "Can't get power efficiency in debug mode"
assert power_eff_input_freq is not None
self.power_eff_input_freq = rescale(power_eff_input_freq, 1 / dt)
# Rescale variables for Loihi
refractory_period = rescale(refractory_period, dt)
v_decay = int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_v, dt))))
c_decay = int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_i, dt))))
v_decay_pair = (
v_decay
if tau_v_pair is None
else int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_v_pair, dt))))
)
c_decay_pair = (
c_decay
if tau_i_pair is None
else int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_i_pair, dt))))
)
v_th = int(v_th * _SCALING_FACTOR)
self.bin_size = rescale(bin_size, dt)
build_neuron_nargs = {
"nb_of_neurons": topology.number_of_nodes(),
"nb_of_synapses": topology.number_of_edges(),
"nb_inputs": nb_of_conn_per_input * input_dim,
"v_decay": v_decay,
"c_decay": c_decay,
"v_decay_pair": v_decay_pair,
"c_decay_pair": c_decay_pair,
"v_th": v_th,
"refractory_period": refractory_period,
"alpha": alpha,
}
build_synapse_nargs = {
"topology": topology,
"alpha": alpha,
"beta": beta,
}
if get_power_eff:
cores_left = 128 # For one full loihi chip
self.nb_replicas = 0
while True:
self.nb_replicas += 1
build_neuron_nargs["starting_core"] = 128 - cores_left
nb_cores_used = self._build_neurons(**build_neuron_nargs)
self._build_synapses(**build_synapse_nargs)
cores_left -= nb_cores_used
if cores_left < nb_cores_used:
break
else:
self._build_neurons(**build_neuron_nargs)
self._build_synapses(**build_synapse_nargs)
self._build_fake_probes() # For snips bin-counters
self._build_input_gen(
nb_neurons=topology.number_of_nodes(),
input_dim=input_dim,
nb_of_conn_per_input=nb_of_conn_per_input,
)
self.weight_probe = self.connections.probe(
[nx.ProbeParameter.SYNAPSE_WEIGHT],
probeConditions=[IntervalProbeCondition(dt=self.bin_size)],
)
if debug:
(self.spike_probe,) = self.grp.probe([nx.ProbeParameter.SPIKE])
(self.pair_spike_probe,) = self._pair_grp.probe([nx.ProbeParameter.SPIKE])
self.tag_probe = self.connections.probe(
[nx.ProbeParameter.SYNAPSE_TAG],
probeConditions=[IntervalProbeCondition(dt=self.bin_size)],
)
def _build_board(self):
if self.board is not None:
self.board.disconnect()
compiler = nx.N2Compiler()
self.board = compiler.compile(self.net)
# self.board.sync = True # TODO Validate
self._build_snips()
def __enter__(self):
return self
def __exit__(self, *_):
if self.board is not None:
self.board.disconnect()
if self.net is not None:
self.net.disconnect()
def power_efficiency_run(self, duration: int):
"""Run a simulation for duration timesteps and return power profile dictionary"""
with self:
self._build_board()
buffer_size = 1024 * 2 # from characterization.py
self.energy_probe = self.board.probe(
probeType=nx.ProbeParameter.ENERGY,
probeCondition=nx.PerformanceProbeCondition(
tStart=1,
tEnd=duration,
bufferSize=buffer_size,
binSize=int(np.power(2, np.ceil( | np.log2(duration / buffer_size) | numpy.log2 |