prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for schedules.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from rlax._src import schedules
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
class PolynomialTest(parameterized.TestCase):
def test_linear(self, compile_fn, place_fn):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = schedules.polynomial_schedule(10., 20., 1, 10)
# Optionally compile.
schedule_fn = compile_fn(schedule_fn)
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Optionally convert to device array.
step_count = place_fn(count)
# Compute next value.
generated_vals.append(schedule_fn(step_count))
# Test output.
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, | np.array(generated_vals) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import time
# Rotating hyperplane dataset
def create_hyperplane_dataset(n_samples, n_dim=2, plane_angle=0.45):
w = np.dot(np.array([[np.cos(plane_angle), -np.sin(plane_angle)], [np.sin(plane_angle), | np.cos(plane_angle) | numpy.cos |
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
import numpy as np
from numpy.testing.nosetester import import_nose
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
Notes
-----
.. versionadded:: 1.8.0
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
if sys.version_info.major >= 3:
funcname = nose.tools.assert_raises_regex
else:
# Only present in Python 2.7, missing from unittest in 2.6
funcname = nose.tools.assert_raises_regexp
return funcname(exception_class, expected_regexp, callable_obj,
*args, **kwargs)
if NumpyVersion(np.__version__) >= '1.10.0':
from numpy import broadcast_to
else:
# Definition of `broadcast_to` from numpy 1.10.0.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = | np.array(array, copy=False, subok=subok) | numpy.array |
from linlearn import BinaryClassifier, MultiClassifier
from linlearn.robust_means import Holland_catoni_estimator, gmom, alg2
import numpy as np
import gzip
import logging
import pickle
from datetime import datetime
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from scipy.special import logsumexp, softmax
import os
import itertools
from tqdm import tqdm
import joblib
import time
def ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
ensure_directory('exp_archives/')
file_handler = logging.FileHandler(filename='exp_archives/classif_exp.log')
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", handlers=handlers
)
save_results = False
save_fig= True
dataset="MNIST"
logging.info(64*"=")
logging.info("Running new experiment session ON GPU with dataset : %s" % dataset)
logging.info(64*"=")
m_SVRG = 50
step_size = 0.01
max_iter = 10
fit_intercept = True
n_samples = 1000
n_repeats = 2
logging.info("Parameters are : n_repeats = %d , n_samples = %d , max_ter = %d , fit_intercept=%r , m_SVRG = %d" % (n_repeats, n_samples or 0, max_iter, fit_intercept, m_SVRG))
if not save_results:
logging.info("WARNING : results will NOT be saved at the end of this session")
def _images(path):
"""Return images loaded locally."""
with gzip.open(path) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
return pixels.reshape(-1, 784).astype('float64') / 255
def _labels(path):
"""Return labels loaded locally."""
with gzip.open(path) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
def _onehot(integer_labels):
"""Return matrix whose rows are onehot encodings of integers."""
n_rows = len(integer_labels)
n_cols = integer_labels.max() + 1
onehot = np.zeros((n_rows, n_cols), dtype='uint8')
onehot[np.arange(n_rows), integer_labels] = 1
return onehot
return _onehot(integer_labels)
mnist_train_images_file = "mnist_data/train-images-idx3-ubyte.gz"
mnist_train_labels_file = "mnist_data/train-labels-idx1-ubyte.gz"
mnist_test_images_file = "mnist_data/t10k-images-idx3-ubyte.gz"
mnist_test_labels_file = "mnist_data/t10k-labels-idx1-ubyte.gz"
logging.info("loading data ...")
X_train = _images(mnist_train_images_file)[:n_samples]
y_train = _labels(mnist_train_labels_file)[:n_samples]
X_test = _images(mnist_test_images_file)
y_test = _labels(mnist_test_labels_file)
def l1_apply_single(x, t):
if x > t:
return x - t
elif x < -t:
return x + t
else:
return 0.0
def sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
if fit_intercept:
w0 = w[0] if lnlearn else w[0,:]
w1 = w[1] if lnlearn else w[1:,:]
else:
w0 = 0
w1 = w
scores = X @ w1 + w0
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
obj = (-scores[np.arange(X.shape[0]), np.argmax(y, axis=1)] + logsumexp(scores, axis=1))
return obj
def objective(X, y, w, fit_intercept=fit_intercept, lnlearn=False):
return sample_objectives(X, y, w, fit_intercept=fit_intercept, lnlearn=lnlearn).mean()
def gradient(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.vstack((sftmax[:,:-1].sum(axis=0), X.T @ sftmax[:,:-1]))/X.shape[0]
else:
return (X.T @ sftmax[:,:-1])/X.shape[0] # np.vstack((np.ones((X.shape[0], 1)) @ sftmax[:,:-1], X.T @ sftmax[:,:-1]
def sample_gradients(X, y, w, fit_intercept=fit_intercept):
scores = X @ w[1:,:] + w[0,:] if fit_intercept else X @ w
scores = np.hstack((scores, np.zeros((X.shape[0], 1))))
sftmax = softmax(scores, axis=1) - y#np.hstack((y, np.zeros((X.shape[0], 1))))
if fit_intercept:
return np.concatenate(
(sftmax[:,np.newaxis,:-1], np.einsum("ij, ik->ijk", X, sftmax[:,:-1])), axis=1)
else:
return np.einsum("ij, ik->ijk", X, sftmax[:,:-1])
# def train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept)
# def test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept)
#
# def linlearn_train_loss(w): return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=True)
# def linlearn_test_loss(w): return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=True)
# linlearn_tracked_funs = [linlearn_train_loss, linlearn_test_loss]
linlearn_algorithms = ["mom_cgd", "catoni_cgd", "tmean_cgd"]
def train_loss(w, algo_name=""):
return objective(X_train, y_train, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
def test_loss(w, algo_name=""):
return objective(X_test, y_test, w, fit_intercept=fit_intercept, lnlearn=algo_name in linlearn_algorithms)
tracked_funs = [train_loss, test_loss]
class Record(object):
def __init__(self, shape, capacity):
self.record = np.zeros(capacity) if shape == 1 else np.zeros(tuple([capacity] + list(shape)))
self.cursor = 0
def update(self, value):
self.record[self.cursor] = value
self.cursor += 1
def __len__(self):
return self.record.shape[0]
def tmean_cgd(X_train, y_train, batch_size=500):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="tmean", fit_intercept=fit_intercept,
thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
# def catoni_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches)*X_train.shape[0] + (i % n_batches)*batch_size for i in range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def catoni_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="catoni", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def mom_cgd(X_train, y_train, batch_size=500):
# mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept,
# thresholding=False, step_size=step_size*batch_size/1000, loss="multilogistic", batch_size=batch_size)
# mom_logreg.fit(X_train, y_train, tracked_funs=linlearn_tracked_funs)
#
# n_iter = len(mom_logreg.optimization_result_.tracked_funs[0])
# n_batches = X_train.shape[0] // batch_size + int(X_train.shape[0] % batch_size > 0)
# gradient_counts = [(i // n_batches) * X_train.shape[0] + (i % n_batches) * batch_size for i in
# range(n_iter)]
#
# return mom_logreg.optimization_result_.tracked_funs + [gradient_counts]
def mom_cgd(X_train, y_train, l1_penalty=1):
mom_logreg = MultiClassifier(tol=1e-17, max_iter=max_iter, strategy="mom", fit_intercept=fit_intercept, penalty="l1", C=l1_penalty,
step_size=step_size, loss="multilogistic")
param_record = Record((X_train.shape[1]+int(fit_intercept), y_train.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
if fit_intercept:
param_tracker = lambda w : param_record.update(np.vstack(w))
else:
param_tracker = lambda w : param_record.update(w)
mom_logreg.fit(X_train, y_train, trackers=[param_tracker, lambda _:time_record.update(time.time())])
return param_record, time_record
# def SVRG(X, y, grad, m, w0=None, T=max_iter, fit_intercept=fit_intercept, tracked_funs=tracked_funs):
# if w0 is None:
# w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
# w_tilde = w0
# wt = w0
# step = step_size*(X.shape[0]/m + 2)/1000
# tracks = [[obj(w0)] for obj in tracked_funs] + [[0]]
# for i in tqdm(range((T*500)//(X.shape[0] + 2*m) + 1), desc="SVRG"):
# mu = grad(X, y, w_tilde, fit_intercept=fit_intercept)
# additional_gradients = X.shape[0]
# for j in range(m):
# ind = np.random.randint(X.shape[0])
# X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1,:]
# wt -= step*(grad(X_ind, y_ind, wt, fit_intercept=fit_intercept) - grad(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
# additional_gradients += 2
# for idx, obj in enumerate(tracked_funs):
# tracks[idx].append(obj(wt))
# tracks[-1].append(tracks[-1][-1] + additional_gradients)
# additional_gradients = 0
# w_tilde = wt
# return tracks
def SVRG(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
w_tilde = w0
wt = w0
step = step_size/(X.shape[0])
m = X.shape[0]
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SVRG"):
mu = gradient(X, y, w_tilde, fit_intercept=fit_intercept)
for j in range(m):
ind = np.random.randint(X.shape[0])
X_ind, y_ind = X[ind:ind+1,:], y[ind:ind+1]
wt -= step*(gradient(X_ind, y_ind, wt, fit_intercept=fit_intercept) - gradient(X_ind, y_ind, w_tilde, fit_intercept=fit_intercept) + mu)
w_tilde = wt
param_record.update(wt)
time_record.update(time.time())
return param_record, time_record
def SGD(X, y, w0=None, T=max_iter, fit_intercept=fit_intercept):
if w0 is None:
w0 = np.zeros((X.shape[1] + int(fit_intercept), y.shape[1]-1))
wt = w0
step = step_size/(X.shape[0])
param_record = Record((X.shape[1]+int(fit_intercept), y.shape[1]-1), max_iter)
time_record = Record(1, max_iter)
for i in tqdm(range(T), desc="SGD"):
index = | np.random.randint(X.shape[0]) | numpy.random.randint |
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
np.random.seed(100)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
np.random.seed(1000)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = np.array([2.0])
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = np.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.allclose(res.mfit, mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
np.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
assert np.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.allclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_all_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.npar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = np.sqrt(2.0*test_sexp)
test_sobs = np.sum(self.ps.power - self.optres.p_opt[0])
assert np.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert np.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
npar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * npar
test_bic = self.optres.result + npar * np.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert np.isclose(test_aic, self.optres.aic)
assert np.isclose(test_bic, self.optres.bic)
assert np.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert np.allclose(self.optres.cov, np.asarray(self.opt.hess_inv))
assert np.allclose(self.optres.err, np.sqrt(np.diag(self.opt.hess_inv)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.allclose(optres.cov, hess_inv)
assert np.allclose(optres.err, np.sqrt(np.diag(np.abs(hess_inv))))
def test_print_summary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_summary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
# store all the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert np.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
test_mean = 2.0
test_std = 0.2
assert np.isclose(test_mean, s.mean[0], rtol=0.1)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.linspace(1, 10.0, nfreq)
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
assert np.absolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
pe = PSDParEst(ps)
max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)
assert np.isclose(max_x, ps.freq[mp_ind])
assert max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = np.ones_like(ps.freq)
max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)
assert np.isclose(max_y[0], 2*max_power)
assert np.isclose(max_x[0], ps.freq[mp_ind])
assert max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
max_post=False, seed=seed)
assert maxpow_sim.shape[0] == nsim
assert np.all(maxpow_sim > 9.00) and np.all(maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d( | np.ones(nsim) | numpy.ones |
"""Test correlation and distance correlation estimators."""
import numpy as np
from frites.estimator import CorrEstimator, DcorrEstimator
array_equal = np.testing.assert_array_equal
class TestCorrEstimator(object):
def test_corr_definition(self):
"""Test definition of correlation estimator."""
CorrEstimator()
def test_corr_estimate(self):
"""Test getting the core function."""
x, y = np.random.rand(10, 1, 100), np.random.rand(10, 1, 100)
cat = np.array([0] * 50 + [1] * 50)
est = CorrEstimator()
for func in [0, 1]:
if func == 0: # estimator.get_function()
fcn = est.get_function()
elif func == 1: # estimator.estimate
fcn = est.estimate
# no categories
array_equal(fcn(x[0, 0, :], y[0, 0, :]).shape, (1, 1))
array_equal(fcn(x[0, :, :], y[0, 0, :]).shape, (1, 1))
array_equal(fcn(x, y).shape, (1, 10))
# with categories
array_equal(fcn(x[0, 0, :], y[0, 0, :],
categories=cat).shape, (2, 1))
array_equal(fcn(x[0, :, :], y[0, 0, :],
categories=cat).shape, (2, 1))
array_equal(fcn(x, y, categories=cat).shape, (2, 10))
def test_corr_functional(self):
"""Functional test of the correlation."""
fcn = CorrEstimator().get_function()
# no categories
x, y = np.random.rand(2, 1, 100), np.random.rand(100)
x[1, ...] += y.reshape(1, -1)
corr = fcn(x, y).ravel()
assert corr[0] < corr[1]
# with categories
x, y = np.random.rand(100), | np.random.rand(100) | numpy.random.rand |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import numpy as np
import tensorflow as tf
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from sklearn.model_selection import train_test_split
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import hparam
from google.cloud.storage import blob, bucket, client
import trainer.dataset
import trainer.model
import trainer.ml_helpers
import trainer.top_words
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
"""
def _experiment_fn(config, hparams):
index_to_component = {}
if hparams.train_file:
with open(hparams.train_file) as f:
if hparams.trainer_type == 'spam':
training_data = trainer.ml_helpers.spam_from_file(f)
else:
training_data = trainer.ml_helpers.component_from_file(f)
else:
training_data = trainer.dataset.fetch_training_data(hparams.gcs_bucket,
hparams.gcs_prefix, hparams.trainer_type)
tf.logging.info('Training data received. Len: %d' % len(training_data))
if hparams.trainer_type == 'spam':
X, y = trainer.ml_helpers.transform_spam_csv_to_features(
training_data)
else:
top_list = trainer.top_words.make_top_words_list(hparams.job_dir)
X, y, index_to_component = trainer.ml_helpers \
.transform_component_csv_to_features(training_data, top_list)
tf.logging.info('Features generated')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_train, hparams.trainer_type),
y=np.array(y_train),
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_test, hparams.trainer_type),
y= | np.array(y_test) | numpy.array |
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ctypes
import numpy
import sys
from ctypes import CFUNCTYPE, POINTER, c_double, c_int, c_int32, c_void_p, c_size_t
from numpy.ctypeslib import ndpointer
from .common import SubSolver
from ..model import walk_shape
from ..reparametrization import Reparametrization
class LPSolver(SubSolver):
def get_repametrization(self):
raise NotImplementedError
class TRWS(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
'threads': 1,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self.model = model
self._energy = self._energy_create(model.number_of_variables, model.shape,
sum(1 for x in model.factors if x.number_of_variables > 1))
edge_counter = 0
for i, factor in enumerate(model.factors):
if factor.number_of_variables == 1:
self._energy_add_unary(self._energy, factor.variables[0], factor.data)
elif factor.number_of_variables == 2:
self._energy_add_pairwise(self._energy, edge_counter, *factor.variables, factor.data)
edge_counter += 1
else:
raise RuntimeError('Unsupported factor arity.')
self._energy_finalize(self._energy)
self._solver = self._solver_create(self._energy)
def __del__(self):
if self._energy:
self._energy_destroy(self._energy)
self._energy = None
if self._solver:
self._solver_destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_trws_stub.so')
self._energy_create = self._lib.combilp_trws_stub_energy_create
self._energy_create.argtypes = [c_int32, ndpointer(dtype=c_int32), c_int32]
self._energy_create.restype = c_void_p
self._energy_add_unary = self._lib.combilp_trws_stub_energy_add_unary
self._energy_add_unary.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
self._energy_add_pairwise = self._lib.combilp_trws_stub_energy_add_pairwise
self._energy_add_pairwise.argtypes = [c_void_p, c_int32, c_int32, c_int32, ndpointer(dtype=c_double)]
self._energy_finalize = self._lib.combilp_trws_stub_energy_finalize
self._energy_finalize.argtypes = [c_void_p]
self._energy_destroy = self._lib.combilp_trws_stub_energy_destroy
self._energy_destroy.argtypes = [c_void_p]
self._solver_create = self._lib.combilp_trws_stub_solver_create
self._solver_create.argtypes = [c_void_p]
self._solver_create.restype = c_void_p
self._solve = self._lib.combilp_trws_stub_solve
self._solve.argtypes = [c_void_p, c_int, c_int]
self._solver_destroy = self._lib.combilp_trws_stub_destroy_solver
self._solver_destroy.argtypes = [c_void_p]
self._get_backward_messages = self._lib.combilp_trws_stub_get_backward_messages
self._get_backward_messages.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
def solve(self):
self._solve(self._solver,
self.parameters['max_iterations'],
self.parameters['threads'])
def get_repametrization(self):
repa = Reparametrization(self.model)
edge_counter = 0
for i, factor in enumerate(self.model.factors):
if factor.number_of_variables == 2:
self._get_backward_messages(self._solver, edge_counter,
repa.get_factor(i, 0))
edge_counter += 1
# recompute forward messages
values = repa.get_factor_value(i)
repa_values = repa.get_factor(i, 1)
for label in range(factor.shape[1]):
minimum = values[:,label].min()
repa_values[label] = minimum
return repa
class SRMP(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self._solver = self._create(self.model.number_of_variables, self.model.shape)
for factor in self.model.factors:
assert(factor.data.flags.c_contiguous)
self._add_factor(self._solver, factor.number_of_variables,
factor.variables, factor.data)
def __del__(self):
if self._solver:
self._destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_srmp_stub.so')
self._message_func_type = CFUNCTYPE(None, c_size_t, POINTER(c_int32), c_int32, POINTER(c_double), POINTER(c_double))
self._message_func_type.from_param = self._message_func_type
self._create = self._lib.combilp_srmp_stub_create
self._create.argtypes = [c_int32, | ndpointer(dtype=c_int32) | numpy.ctypeslib.ndpointer |
import argparse
import os
import pickle as pkl
import numpy as np
import scipy.sparse as smat
from pecos.core.base import clib
from pecos.utils import smat_util
from pecos.utils.cluster_util import ClusterChain
from pecos.xmc import MLModel
from pecos.xmc.xlinear import XLinearModel
def parse_arguments():
parser = argparse.ArgumentParser(
prog="Evaluate how well our model is good at semantic disentablement."
)
parser.add_argument(
"-x",
"--inst-path",
type=str,
required=True,
metavar="PATH",
help="path to npz file of feature matrix",
)
parser.add_argument(
"--y-origin",
type=str,
required=True,
metavar="PATH",
help="path to the npz file of the original label matrix",
)
parser.add_argument(
"--y-binned",
type=str,
required=True,
metavar="PATH",
help="path to the binned label matrix",
)
parser.add_argument(
"-m",
"--model-folder",
type=lambda p: os.path.abspath(p),
required=True,
metavar="DIR",
help="path to the model folder",
)
parser.add_argument(
"--binned-mapper", type=str, required=True, help="path to the mapper file",
)
parser.add_argument(
"--pseudo-label-mapper",
type=str,
default=None,
help="path to pseudo label mapper. If None, this variable is ignored.",
)
parser.add_argument(
"--unused-labels",
type=str,
default=None,
help="path to unused label set. If None, this variable is ignored.",
)
parser.add_argument(
"-b",
"--beam-size",
type=int,
required=True,
help="Beam size to calculate the matching matrix.",
)
args = parser.parse_args()
return args
def get_matching_matrix(xlinear_model, Xt, beam_size=10):
"""Compute the matching matrix.
The matching matrix indicates which cluster(s) are selected for data point in X. The
final results is a sparse matrix of shape N x C, where N is the number of data, and C
is the number of clusters.
Args:
xlinear_model: the pretrained model.
Xt: the feature matrix.
beam_size: beam size for inference.
Returns:
The matching matrix in CSR format.
"""
matching_result = []
batch_size = 8192 * 16
kwargs = {
"beam_size": beam_size,
"only_topk": 30,
"post_processor": "l3-hinge",
}
model_chain = xlinear_model.model.model_chain
for i in range((Xt.shape[0] - 1) // batch_size + 1):
beg, end = i * batch_size, (i + 1) * batch_size
end = min(end, Xt.shape[0])
X_selected = Xt[beg:end]
csr_codes = None
for level in range(len(model_chain) - 1):
cur_model = model_chain[level]
level_pred = cur_model.predict(
X_selected,
csr_codes=csr_codes,
only_topk=beam_size,
post_processor=kwargs["post_processor"],
)
csr_codes = level_pred
matching_result.append(csr_codes)
matching_result = smat.vstack(matching_result, format="csr")
return matching_result
def positive_instances(Xt, Yt, underlying_label_ids):
"""Find the instances having some particular label ids.
For all labels in `underlying_label_ids`, return the list of instances containing
that label as ground-truth.
Args:
Xt: The feature matrix of shape N x d, where N is number of instances, d is
feature dimension.
Yt: The label matrix of shape N x L, L is the size of label space.
underlying_label_ids: The set of target labels.
Returns:
A list of positive instance ids and their feature vectors.
"""
row_ids_list = []
Xt_subsets = []
for label_id in underlying_label_ids:
row_ids = Yt.indices[Yt.indptr[label_id] : Yt.indptr[label_id + 1]]
Xt_subsets.append(Xt[row_ids])
row_ids_list.append(row_ids)
return row_ids_list, Xt_subsets
def label_id_to_cluster_id(label_id, C, unused_labels):
"""Map the label id to the cluster id according to clustering matrix.
Args:
label_id: the label id.
C: the cluster matrix of shape L x C.
unused_labels: used to adjust the label id.
Returns:
the cluster id.
"""
# count how many unused labels that are smaller than label_id
offset = sum([l < label_id for l in unused_labels])
row_id = label_id - offset
assert C.indptr[row_id] + 1 == C.indptr[row_id + 1]
cluster_id = C.indices[C.indptr[row_id]]
return cluster_id
def match(
xlinear_model, beam_size, instance_ids_list, X_subsets, cid1, cid2,
):
"""Given two clusters, distribute all instances to two groups.
Separate all input features `X_subsets` into two subsets `x_cid1` and `x_cid2`,
according to the prediction results from `xlinear_model`. If the scores of an
instance in `cid1` is higher than `cid2`, than this instance is assigned to group1.
Args:
xlinear_model: the model.
beam_size: beam size for inference.
instance_ids_list: the instance id of `X_subsets`.
X_subsets: the feature matrix.
cid1, cid2: the cluster ids of two clusters.
Returns:
the instance ids of two subsets.
"""
x_cid1 = []
x_cid2 = []
for instance_ids, X_subset in zip(instance_ids_list, X_subsets):
matching_matrix = get_matching_matrix(
xlinear_model, X_subset, beam_size,
).toarray()
mask = matching_matrix[:, cid1] > matching_matrix[:, cid2]
x_cid1.extend(instance_ids[mask])
x_cid2.extend(instance_ids[~mask])
return x_cid1, x_cid2
def random_baseline(S1, S2):
"""A random baseline that assigns all instances randomly to two groups.
Args:
S1, S2: the ground truth assignment according to their semantic meanings.
Returns:
VI scores of this random baseline.
"""
S = np.concatenate((S1, S2), axis=0)
experiment = []
for _ in range(100):
np.random.shuffle(S)
selector = np.random.randn(len(S)) > 0
K1 = S[selector]
K2 = S[~selector]
vi_sample = VI(S1, S2, K1, K2)
experiment.append(vi_sample)
return np.mean(experiment)
def VI(S1, S2, K1, K2):
"""Computes the Variation of Information(VI) between two clusters.
See: https://en.wikipedia.org/wiki/Variation_of_information for more information.
Args:
S1, S2: the set of ground truth clusters.
K1, K2: the predicted clusters.
Returns:
the VI score.
"""
assert len(S1) + len(S2) == len(K1) + len(K2)
n = len(S1) + len(S2)
eps = 1.0e-8
p1 = len(S1) / n + eps
p2 = len(S2) / n + eps
q1 = len(K1) / n + eps
q2 = len(K2) / n + eps
r11 = len( | np.intersect1d(S1, K1) | numpy.intersect1d |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = | N.array([1,2,2]) | numpy.array |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import numpy as np
import arrayfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for all parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 norm
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_real = (False, "larger")
self.positivity_imag = (False, "larger")
self.pure_real = False
self.pure_imag = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
slice_separation: For multislice algorithms, how far apart are slices separated, array (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, slice_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * np.ones(shape, dtype = np_complex_datatype) if RI_obj is None else RI_obj.astype(np_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if slice_separation is not None:
#for discontinuous slices
assert len(slice_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.slice_separation = np.asarray(slice_separation).astype(np_float_datatype)
else:
#for continuous slices
self.slice_separation = self.pixel_size_z * np.ones((shape[2]-1,), dtype = np_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.trans_obj = np.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * np.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.real/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imag/k0**2/2.0)**2
RI_obj_real = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imag = -0.5 * self.V_obj.imag/k0**2/RI_obj_real
self.RI_obj = RI_obj_real + 1.0j * RI_obj_imag
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illumination angles in x, default = [0] (on axis)
fy_illu_list: illumination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illumination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illumination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_inplace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before calling, make sure correct object is contained
"""
obj_gpu = af.to_array(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.append([])
if self._scattering_obj.back_scatter:
back_scattered_predict.append([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].append(np.array(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append( | np.array(fields["back_scattered_field"]) | numpy.array |
# coding: utf-8
# ### Compute results for task 1 on the humour dataset.
#
# Please see the readme for instructions on how to produce the GPPL predictions that are required for running this script.
#
# Then, set the variable resfile to point to the ouput folder of the previous step.
#
import string
import pandas as pd
import os, logging, csv
from nltk.tokenize import word_tokenize
from scipy.stats.mstats import spearmanr, pearsonr
import numpy as np
# Where to find the predictions and gold standard
resfile = './results/experiment_humour_2019-02-26_20-44-52/results-2019-02-26_20-44-52.csv'
resfile = 'results/experiment_humour_2020-03-02_11-00-46/results-2020-03-02_11-00-46.csv'
# Load the data
data = pd.read_csv(resfile, usecols=[0,1,2])
ids = data['id'].values
bws = data['bws'].values
gppl = data['predicted'].values
# ### Ties in the BWS Scores contribute to the discrepeancies between BWS and GPPL
#
# GPPL scores are all unique, but BWS contains many ties.
# Selecting only one of the tied items increases the Spearman correlation.
#
# Find the ties in BWS. Compute correlations between those tied items for the GPPL scores vs. original BWS scores and GPPL vs. scaled BWS scores.
# Do the ties contribute a lot of the differences in the overall ranking?
# Another way to test if the ties contribute differences to the ranking:
# Select only one random item from each tie and exclude the rest, then recompute.
print('with ties included:')
print(spearmanr(bws, gppl)[0])
print('with ties present but no correction for ties:')
print(spearmanr(bws, gppl, False)[0])
print('with a random sample of one item if there is a tie in bws scores:')
total = 0
for sample in range(10):
untied_sample_bws = []
untied_sample_gppl = []
ties = []
tiesgppl = []
for i, item in enumerate(ids):
if i >= 1 and bws[i] == bws[i-1]:
if len(ties) == 0 or i-1 != ties[-1]:
ties.append(i-1) # the previous one should be added to the list if we have just recognised it as a tie
ties.append(i)
#randomly choose whether to keep the previous item or this one
if np.random.rand() < 0.5:
pass
else:
untied_sample_bws.pop()
untied_sample_gppl.pop()
untied_sample_bws.append(bws[i])
untied_sample_gppl.append(gppl[i])
else:
untied_sample_bws.append(bws[i])
untied_sample_gppl.append(gppl[i])
if i >= 1 and gppl[i] == gppl[i-1]:
if len(tiesgppl) == 0 or i-1 != tiesgppl[-1]:
tiesgppl.append(i-1) # the previous one should be added to the list if we have just recognised it as a tie
tiesgppl.append(i)
rho = spearmanr(untied_sample_bws, untied_sample_gppl)[0]
total += rho
print(rho)
print('Number of BWS tied items = %i' % len(ties))
print('Number of GPPL tied items = %i' % len(tiesgppl))
sample_size = len(untied_sample_bws)
print('Mean for samples without ties = %f' % (total / 10))
print('Correlations for random samples of the same size (%i), allowing ties: ' % sample_size)
total = 0
for sample in range(10):
# take a random sample, without caring about ties
randidxs = np.random.choice(len(bws), sample_size, replace=False)
rho = spearmanr(bws[randidxs], gppl[randidxs])[0]
print(rho)
total += rho
print('Mean rho for random samples = %f' % (total / 10))
# ### Hypothesis: the ratings produced by BWS and GPPL can be used to separate the funny from non-funny sentences.
# This compares the predicted ratings to the gold standard *classifications* to see if the ratings can be used
# to separate funny and non-funny.
# load the discrete labels
def get_cats(fname):
with open(os.path.join('./data/pl-humor-full', fname), 'r') as f:
for line in f:
line = line.strip()
for c in string.punctuation + ' ' + '\xa0':
line = line.replace(c, '')
# line = line.replace(' ', '').strip()
# line = line.replace('"', '') # this is probably borked by tokenization?
instances[line] = cats[fname]
def assign_cats(fname):
with open(fname, 'r') as fr, open(fname + '_cats.csv', 'w') as fw:
reader = csv.DictReader(fr)
writer = csv.DictWriter(fw, fieldnames=['id', 'bws', 'predicted', 'category', 'sentence'])
writer.writeheader()
for row in reader:
sentence = row['sentence'].strip()
for c in string.punctuation + ' ':
sentence = sentence.replace(c, '')
# sentence = row['sentence'].replace(' ','').strip()
# sentence = sentence.replace('`', '\'') # this is probably borked by tokenization?
# sentence = sentence.replace('"', '') # this is probably borked by tokenization?
row['category'] = instances[sentence]
writer.writerow(row)
cats = dict()
cats['jokes_heterographic_puns.txt'] = 'hetpun'
cats['jokes_homographic_puns.txt'] = 'hompun'
cats['jokes_nonpuns.txt'] = 'nonpun'
cats['nonjokes.txt'] = 'non'
instances = dict()
for fname in cats.keys():
get_cats(fname)
assign_cats(resfile)
catfile = os.path.expanduser(resfile + '_cats.csv')
#'./results/experiment_humour_2019-02-28_16-39-36/cats/results-2019-02-28_20-45-25.csv')
cats = pd.read_csv(catfile, index_col=0, usecols=[0,3])
cat_list = np.array([cats.loc[instance].values[0] if instance in cats.index else 'unknown' for instance in ids])
gfunny = (cat_list == 'hompun') | (cat_list == 'hetpun')
gunfunny = (cat_list == 'nonpun') | (cat_list == 'non')
print('Number of funny = %i, non-funny = %i' % (np.sum(gfunny),
np.sum(gunfunny) ) )
# check classification accuracy -- how well does our ranking separate the two classes
from sklearn.metrics import roc_auc_score
gold = np.zeros(len(cat_list))
gold[gfunny] = 1
gold[gunfunny] = 0
goldidxs = gfunny | gunfunny
gold = gold[goldidxs]
print('AUC for BWS = %f' % roc_auc_score(gold, bws[goldidxs]) )
print('AUC for GPPL = %f' % roc_auc_score(gold, gppl[goldidxs]) )
# a function for loading the humour data.
def load_crowd_data_TM(path):
"""
Read csv and create preference pairs of tokenized sentences.
:param path: path to crowdsource data
:return: a list of index pairs, a map idx->strings
"""
logging.info('Loading crowd data...')
pairs = []
idx_instance_list = []
with open(path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
next(reader) # skip header row
for line_no, line in enumerate(reader):
answer = line[1]
A = word_tokenize(line[2])
B = word_tokenize(line[3])
# add instances to list (if not alreay in it)
if A not in idx_instance_list:
idx_instance_list.append(A)
if B not in idx_instance_list:
idx_instance_list.append(B)
# add pairs to list (in decreasing preference order)
if answer == 'A':
pairs.append((idx_instance_list.index(A), idx_instance_list.index(B)))
if answer == 'B':
pairs.append((idx_instance_list.index(B), idx_instance_list.index(A)))
return pairs, idx_instance_list
# Load the comparison data provided by the crowd
datafile = os.path.expanduser('./data/pl-humor-full/results.tsv')
pairs, idxs = load_crowd_data_TM(datafile)
pairs = np.array(pairs)
np.savetxt(os.path.expanduser('./data/pl-humor-full/pairs.csv'), pairs, '%i', delimiter=',')
# For each item compute its BWS scores
# but scale by the BWS scores of the items they are compared against.
# This should indicate whether two items with same BWS score should
# actually be ranked differently according to what they were compared against.
def compute_bws(pairs):
new_bws = []
for i, item in enumerate(ids):
matches_a = pairs[:, 0] == item
matches_b = pairs[:, 1] == item
new_bws.append((np.sum(matches_a) - np.sum(matches_b))
/ float(np.sum(matches_a) + np.sum(matches_b)))
return new_bws
# ### Agreement and consistency of annotators
# Table 3: For the humour dataset, compute the correlation between the gold standard and the BWS scores with subsets of data.
# Take random subsets of pairs so that each pair has only 4 annotations
def get_pid(pair):
return '#'.join([str(i) for i in sorted(pair)])
def compute_mean_correlation(nannos):
nreps = 10
mean_rho = 0
for rep in range(nreps):
pair_ids = list([get_pid(pair) for pair in pairs])
upair_ids = | np.unique(pair_ids) | numpy.unique |
# -*- coding: utf-8 -*-
from . import plot_settings as pls
from . import plots as pl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import logging
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from scipy.stats.kde import gaussian_kde
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
def find_best_para(para_trace, bins):
''' find the best parameter and its 1-sigma/2-sigma for (non) Gaussian distribution '''
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
hist = np.histogram(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para = np.r_[best_para, para_err_left, para_err_right]
return para
def find_best_para_plt(para_trace, bins):
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
plt.figure()
hist = plt.hist(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para = np.r_[best_para, para_err_left, para_err_right]
return para
def find_best_para2(para_trace, bins):
para_trace ,bins = para_trace, bins
para_trace = np.sort(para_trace)
hist = np.histogram(para_trace, bins)
bins, x = hist[0], hist[1]
sort_bin_nums = np.sort(bins)
best_bins = sort_bin_nums[-7:] # top 7
best_bins_nums = np.r_[ np.where(bins==best_bins[0])[0], \
np.where(bins==best_bins[1])[0], np.where(bins==best_bins[2])[0], \
np.where(bins==best_bins[3])[0], np.where(bins==best_bins[4])[0], \
np.where(bins==best_bins[5])[0], np.where(bins==best_bins[6])[0] ]
# use the everage of top 7
best_para = (x[min(best_bins_nums)] + x[max(best_bins_nums) +1])/2.
left = np.where(para_trace <= best_para)[0]
right = np.where(para_trace > best_para)[0]
para_err_left1 = best_para - para_trace[int(len(left) * (1-0.6826))]
para_err_right1 = para_trace[int(len(right) * 0.6826) + len(left)] - best_para
para_err_left2 = best_para - para_trace[int(len(left) * (1-0.9544))]
para_err_right2 = para_trace[int(len(right) * 0.9544) + len(left)] - best_para
para = np.r_[best_para, para_err_left2,para_err_left1, para_err_right1,para_err_right2]
return para
def _quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
This is a copy of quantile in corner (https://github.com/dfm/corner.py). Copyright (c) 2013-2015 <NAME>.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = | np.atleast_1d(x) | numpy.atleast_1d |
from __future__ import division
import pytest
import numpy as np
import cudf as pd
import fast_carpenter.masked_tree as m_tree
@pytest.fixture
def tree_no_mask(infile, full_event_range):
return m_tree.MaskedUprootTree(infile, event_ranger=full_event_range)
@pytest.fixture
def tree_w_mask_bool(infile, event_range):
mask = np.ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
return m_tree.MaskedUprootTree(infile, event_ranger=event_range, mask=mask)
@pytest.fixture
def tree_w_mask_int(infile, event_range):
mask = np.ones(event_range.entries_in_block, dtype=bool)
mask[::2] = False
mask = | np.where(mask) | numpy.where |
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.metrics.tests.test_ranking import make_prediction
from sklearn.utils.validation import check_consistent_length
from mcc_f1 import mcc_f1_curve
def test_mcc_f1_curve():
# Test MCC and F1 values for all points of the curve
y_true, _, probas_pred = make_prediction(binary=True)
mcc, f1, thres = mcc_f1_curve(y_true, probas_pred)
check_consistent_length(mcc, f1, thres)
expected_mcc, expected_f1 = _mcc_f1_calc(y_true, probas_pred, thres)
assert_array_almost_equal(f1, expected_f1)
assert_array_almost_equal(mcc, expected_mcc)
def _mcc_f1_calc(y_true, probas_pred, thresholds):
# Alternative calculation of (unit-normalized) MCC and F1 scores
pp = probas_pred
ts = thresholds
tps = np.array([np.logical_and(pp >= t, y_true == 1).sum() for t in ts])
fps = np.array([np.logical_and(pp >= t, y_true == 0).sum() for t in ts])
tns = np.array([np.logical_and(pp < t, y_true == 0).sum() for t in ts])
fns = np.array([np.logical_and(pp < t, y_true == 1).sum() for t in ts])
with np.errstate(divide='ignore', invalid='ignore'):
f1s = 2*tps / (2*tps + fps + fns)
d = np.sqrt((tps+fps)*(tps+fns)*(tns+fps)*(tns+fns))
d = | np.array([1 if di == 0 else di for di in d]) | numpy.array |
import re
import os
import numpy as np
import pandas as pd
import scipy.stats as sps
pd.options.display.max_rows = 4000
pd.options.display.max_columns = 4000
def write_txt(str, path):
text_file = open(path, "w")
text_file.write(str)
text_file.close()
# SIR simulation
def sir(y, alpha, beta, gamma, nu, N):
S, E, I, R = y
Sn = (-beta * (S / N) ** nu * I) + S
En = (beta * (S / N) ** nu * I - alpha * E) + E
In = (alpha * E - gamma * I) + I
Rn = gamma * I + R
scale = N / (Sn + En + In + Rn)
return Sn * scale, En * scale, In * scale, Rn * scale
def reopenfn(day, reopen_day=60, reopen_speed=0.1, reopen_cap = .5):
"""Starting on `reopen_day`, reduce contact restrictions
by `reopen_speed`*100%.
"""
if day < reopen_day:
return 1.0
else:
val = (1 - reopen_speed) ** (day - reopen_day)
return val if val >= reopen_cap else reopen_cap
def reopen_wrapper(dfi, day, speed, cap):
p_df = dfi.reset_index()
p_df.columns = ['param', 'val']
ro = dict(param = ['reopen_day', 'reopen_speed', 'reopen_cap'],
val = [day, speed, cap])
p_df = pd.concat([p_df, pd.DataFrame(ro)])
p_df
SIR_ii = SIR_from_params(p_df)
return SIR_ii['arr_stoch'][:,3]
def scale(arr, mu, sig):
if len(arr.shape)==1:
arr = np.expand_dims(arr, 0)
arr = np.apply_along_axis(lambda x: x-mu, 1, arr)
arr = np.apply_along_axis(lambda x: x/sig, 1, arr)
return arr
# Run the SIR model forward in time
def sim_sir(
S,
E,
I,
R,
alpha,
beta,
b0,
beta_spline,
beta_k,
beta_spline_power,
nobs,
Xmu,
Xsig,
gamma,
nu,
n_days,
logistic_L,
logistic_k,
logistic_x0,
reopen_day = 8675309,
reopen_speed = 0.0,
reopen_cap = 1.0,
):
N = S + E + I + R
s, e, i, r = [S], [E], [I], [R]
if len(beta_spline) > 0:
knots = np.linspace(0, nobs-nobs/beta_k/2, beta_k)
for day in range(n_days):
y = S, E, I, R
# evaluate splines
if len(beta_spline) > 0:
X = power_spline(day, knots, beta_spline_power, xtrim = nobs)
# X = scale(X, Xmu, Xsig)
#scale to prevent overflows and make the penalties comparable across bases
XB = float(X@beta_spline)
sd = logistic(L = 1, k=1, x0 = 0, x= b0 + XB)
else:
sd = logistic(logistic_L, logistic_k, logistic_x0, x=day)
sd *= reopenfn(day, reopen_day, reopen_speed, reopen_cap)
beta_t = beta * (1 - sd)
S, E, I, R = sir(y, alpha, beta_t, gamma, nu, N)
s.append(S)
e.append(E)
i.append(I)
r.append(R)
s, e, i, r = np.array(s), np.array(e), np.array(i), np.array(r)
return s, e, i, r
# # compute X scale factor. first need to compute who X matrix across all days
# nobs = 100
# n_days = 100
# beta_spline_power = 2
# beta_spline = np.random.uniform(size = len(knots))
# X = np.stack([power_spline(day, knots, beta_spline_power, xtrim = nobs) for day in range(n_days)])
# # need to be careful with this: apply the scaling to the new X's when predicting
def power_spline(x, knots, n, xtrim):
if x > xtrim: #trim the ends of the spline to prevent nonsense extrapolation
x = xtrim + 1
spl = x - np.array(knots)
spl[spl<0] = 0
spl = spl/(xtrim**n)#scaling -- xtrim is the max number of days, so the highest value that the spline could have
return spl**n
'''
Plan:
beta_t = L/(1 + np.exp(XB))
'''
def logistic(L, k, x0, x):
return L / (1 + np.exp(-k * (x - x0)))
def qdraw(qvec, p_df):
"""
Function takes a vector of quantiles and returns marginals based on the parameters in the parameter data frame
It returns a bunch of parameters for inputting into SIR
It'll also return their probability under the prior
"""
assert len(qvec) == p_df.shape[0]
outdicts = []
for i in range(len(qvec)):
if p_df.distribution.iloc[i] == "constant":
out = dict(param=p_df.param.iloc[i], val=p_df.base.iloc[i], prob=1)
else:
# Construct this differently for different distributoons
if p_df.distribution.iloc[i] == "gamma":
p = (qvec[i], p_df.p1.iloc[i], 0, p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "beta":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "uniform":
p = (qvec[i], p_df.p1.iloc[i], p_df.p1.iloc[i] + p_df.p2.iloc[i])
elif p_df.distribution.iloc[i] == "norm":
p = (qvec[i], p_df.p1.iloc[i], p_df.p2.iloc[i])
out = dict(
param=p_df.param.iloc[i],
val=getattr(sps, p_df.distribution.iloc[i]).ppf(*p),
)
# does scipy not have a function to get the density from the quantile?
p_pdf = (out["val"],) + p[1:]
out.update({"prob": getattr(sps, p_df.distribution.iloc[i]).pdf(*p_pdf)})
outdicts.append(out)
return pd.DataFrame(outdicts)
def jumper(start, jump_sd):
probit = sps.norm.ppf(start)
probit += np.random.normal(size=len(probit), scale=jump_sd)
newq = sps.norm.cdf(probit)
return newq
def compute_census(projection_admits_series, mean_los):
"""Compute Census based on exponential LOS distribution."""
census = [0]
for a in projection_admits_series.values:
c = float(a) + (1 - 1 / float(mean_los)) * census[-1]
census.append(c)
return np.array(census[1:])
def SIR_from_params(p_df):
"""
This function takes the output from the qdraw function
"""
n_hosp = int(p_df.val.loc[p_df.param == "n_hosp"])
incubation_days = float(p_df.val.loc[p_df.param == "incubation_days"])
hosp_prop = float(p_df.val.loc[p_df.param == "hosp_prop"])
ICU_prop = float(p_df.val.loc[p_df.param == "ICU_prop"])
vent_prop = float(p_df.val.loc[p_df.param == "vent_prop"])
hosp_LOS = float(p_df.val.loc[p_df.param == "hosp_LOS"])
ICU_LOS = float(p_df.val.loc[p_df.param == "ICU_LOS"])
vent_LOS = float(p_df.val.loc[p_df.param == "vent_LOS"])
recovery_days = float(p_df.val.loc[p_df.param == "recovery_days"])
mkt_share = float(p_df.val.loc[p_df.param == "mkt_share"])
region_pop = float(p_df.val.loc[p_df.param == "region_pop"])
logistic_k = float(p_df.val.loc[p_df.param == "logistic_k"])
logistic_L = float(p_df.val.loc[p_df.param == "logistic_L"])
logistic_x0 = float(p_df.val.loc[p_df.param == "logistic_x0"])
nu = float(p_df.val.loc[p_df.param == "nu"])
beta = float(
p_df.val.loc[p_df.param == "beta"]
) # get beta directly rather than via doubling time
# assemble the coefficient vector for the splines
beta_spline = np.array(p_df.val.loc[p_df.param.str.contains('beta_spline_coef')]) #this evaluates to an empty array if it's not in the params
if len(beta_spline) > 0:
b0 = float(p_df.val.loc[p_df.param == "b0"])
beta_spline_power = np.array(p_df.val.loc[p_df.param == "beta_spline_power"])
nobs = float(p_df.val.loc[p_df.param == "nobs"])
beta_k = int(p_df.loc[p_df.param == "beta_spline_dimension", 'val'])
Xmu = p_df.loc[p_df.param == "Xmu", 'val'].iloc[0]
Xsig = p_df.loc[p_df.param == "Xsig", 'val'].iloc[0]
else:
beta_spline_power = None
beta_k = None
nobs = None
b0 = None
Xmu, Xsig = None, None
reopen_day, reopen_speed, reopen_cap = 1000, 0.0, 1.0
if "reopen_day" in p_df.param.values:
reopen_day = int(p_df.val.loc[p_df.param == "reopen_day"])
if "reopen_speed" in p_df.param.values:
reopen_speed = float(p_df.val.loc[p_df.param == "reopen_speed"])
if "reopen_cap" in p_df.param.values:
reopen_cap = float(p_df.val.loc[p_df.param == "reopen_cap"])
alpha = 1 / incubation_days
gamma = 1 / recovery_days
total_infections = n_hosp / mkt_share / hosp_prop
n_days = 200
# Offset by the incubation period to start the sim
# that many days before the first hospitalization
# Estimate the number Exposed from the number hospitalized
# on the first day of non-zero covid hospitalizations.
from scipy.stats import expon
# Since incubation_days is exponential in SEIR, we start
# the time `offset` days before the first hospitalization
# We determine offset by allowing enough time for the majority
# of the initial exposures to become infected.
offset = expon.ppf(
0.99, 1 / incubation_days
) # Enough time for 95% of exposed to become infected
offset = int(offset)
s, e, i, r = sim_sir(
S=region_pop - total_infections,
E=total_infections,
I=0.0, # n_infec / detection_prob,
R=0.0,
alpha=alpha,
beta=beta,
b0=b0,
beta_spline = beta_spline,
beta_k = beta_k,
beta_spline_power = beta_spline_power,
Xmu = Xmu,
Xsig = Xsig,
nobs = nobs,
gamma=gamma,
nu=nu,
n_days=n_days + offset,
logistic_L=logistic_L,
logistic_k=logistic_k,
logistic_x0=logistic_x0 + offset,
reopen_day=reopen_day,
reopen_speed=reopen_speed,
reopen_cap=reopen_cap
)
arrs = {}
for sim_type in ["mean", "stochastic"]:
if sim_type == "mean":
ds = np.diff(i) + np.diff(r) # new infections is delta i plus delta r
ds = np.array([0] + list(ds))
ds = ds[offset:]
hosp_raw = hosp_prop
ICU_raw = hosp_raw * ICU_prop # coef param
vent_raw = ICU_raw * vent_prop # coef param
hosp = ds * hosp_raw * mkt_share
icu = ds * ICU_raw * mkt_share
vent = ds * vent_raw * mkt_share
elif sim_type == "stochastic":
# Sampling Stochastic Observation
ds = np.diff(i) + | np.diff(r) | numpy.diff |
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
from datetime import date
from math import e
def calc_rate(data1, data2):
if(data2 == 0):
return data1
else:
if(data1 < data2):
return (data2 / data1) * -1
else:
return data1 / data2
def calc_mort_rate(data1, data2):
if(data2 == 0):
return 0
else:
return data1 / data2
def compute_data(parsed_data):
days = np.array([])
new_cases = np.array([])
cases_growth_factor = np.array([])
new_deaths = np.array([])
deaths_growth_factor = np.array([])
new_tests = np.array([])
tests_growth_factor = np.array([])
new_recovered = np.array([])
recovered_growth_factor = np.array([])
new_hospitalized = np.array([])
hospitalized_growth_factor = np.array([])
mortality_rate = np.array([])
active_cases = np.array([])
for i, entry in enumerate(parsed_data[0]):
if(i == 0):
new_cases = np.append(new_cases, parsed_data[1][i] - 0)
cases_growth_factor = np.append(cases_growth_factor, 0)
new_deaths = np.append(new_deaths, parsed_data[2][i] - 0)
deaths_growth_factor = np.append(deaths_growth_factor, 0)
new_tests = np.append(new_tests, parsed_data[3][i] - 0)
tests_growth_factor = np.append(tests_growth_factor, 0)
new_recovered = np.append(new_recovered, parsed_data[4][i] - 0)
recovered_growth_factor = np.append(recovered_growth_factor, 0)
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - 0)
hospitalized_growth_factor = np.append(hospitalized_growth_factor, 0)
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
continue
new_cases = np.append(new_cases, parsed_data[1][i] - parsed_data[1][i-1])
cases_growth_factor = np.append(cases_growth_factor, calc_rate(parsed_data[1][i], parsed_data[1][i-1]))
new_deaths = np.append(new_deaths, parsed_data[2][i] - parsed_data[2][i-1])
deaths_growth_factor = np.append(deaths_growth_factor, calc_rate(parsed_data[2][i], parsed_data[2][i-1]))
new_tests = np.append(new_tests, parsed_data[3][i] - parsed_data[3][i-1])
tests_growth_factor = np.append(tests_growth_factor, calc_rate(parsed_data[3][i], parsed_data[3][i-1]))
new_recovered = np.append(new_recovered, parsed_data[4][i] - parsed_data[4][i-1])
recovered_growth_factor = np.append(recovered_growth_factor, calc_rate(parsed_data[4][i], parsed_data[4][i-1]))
new_hospitalized = np.append(new_hospitalized, parsed_data[5][i] - parsed_data[5][i-1])
hospitalized_growth_factor = np.append(hospitalized_growth_factor, calc_rate(parsed_data[5][i], parsed_data[5][i-1]))
mortality_rate = np.append(mortality_rate, calc_mort_rate(parsed_data[2][i], parsed_data[1][i]))
active_cases = np.append(active_cases, (parsed_data[1][i] - parsed_data[4][i] - parsed_data[2][i]))
days = np.append(days, i)
parsed_data.append(days)
parsed_data.append(new_cases)
parsed_data.append(cases_growth_factor)
parsed_data.append(new_deaths)
parsed_data.append(deaths_growth_factor)
parsed_data.append(new_recovered)
parsed_data.append(recovered_growth_factor)
parsed_data.append(new_hospitalized)
parsed_data.append(hospitalized_growth_factor)
parsed_data.append(new_tests)
parsed_data.append(tests_growth_factor)
parsed_data.append(mortality_rate)
parsed_data.append(active_cases)
return parsed_data
def logistic_fn(population):
day_counter = 1
days = np.array([])
logistic = np.array([])
current_cases = 1
while (day_counter < 60):
days = np.append(days, day_counter)
log_fn = population / (1 + ((population / current_cases) - 1) * e ** (-0.38 * day_counter))
print(log_fn)
logistic = np.append(logistic, log_fn)
day_counter += 1
return (days, logistic)
def difference(parsed_data, day1, day2):
print("Data difference between:", parsed_data[0][day1], 'and', parsed_data[0][day2])
print("\u0394Days:\t", parsed_data[6][day2] - parsed_data[6][day1])
print("\u0394Cases:\t", parsed_data[1][day2] - parsed_data[1][day1])
print("\u0394Deaths: ", parsed_data[2][day2] - parsed_data[2][day1])
print("\u0394Recov.: ", parsed_data[4][day2] - parsed_data[4][day1])
print("\u0394Hospi.: ", parsed_data[5][day2] - parsed_data[5][day1])
print("\u0394Tests:\t", parsed_data[3][day2] - parsed_data[3][day1])
def projection(next_days, days_passed, parsed_data):
total_cases = float(parsed_data[1][len(parsed_data[1])-1])
total_deaths = float(parsed_data[2][len(parsed_data[2])-1])
total_tests = float(parsed_data[3][len(parsed_data[4])-1])
total_recovered = float(parsed_data[4][len(parsed_data[4])-1])
total_hospitalized = float(parsed_data[5][len(parsed_data[5])-1])
total_active = float(parsed_data[18][len(parsed_data[18])-1])
counter = 0
avg_cases_gf = 0.0
avg_deaths_gf = 0.0
avg_tests_gf = 0.0
avg_recovered_gf = 0.0
avg_hospitalized_gf = 0.0
avg_active_gf = 0.0
while(counter < days_passed):
avg_cases_gf += parsed_data[8][len(parsed_data[8]) - 1 - counter]
avg_deaths_gf += parsed_data[10][len(parsed_data[10]) - 1 - counter]
avg_tests_gf += parsed_data[16][len(parsed_data[16]) - 1 - counter]
avg_recovered_gf += parsed_data[12][len(parsed_data[12]) - 1 - counter]
avg_hospitalized_gf += parsed_data[14][len(parsed_data[14]) - 1 - counter]
avg_active_gf += parsed_data[18][len(parsed_data[18]) - 1 - counter]
counter += 1
avg_cases_gf /= days_passed
avg_deaths_gf /= days_passed
avg_tests_gf /= days_passed
avg_recovered_gf /= days_passed
avg_hospitalized_gf /= days_passed
avg_active_gf /= days_passed
print('Avg Cases Growth Factor (past', days_passed ,'days):', round(avg_cases_gf, 5))
print('Avg Deaths Growth Factor (past', days_passed ,'days):', round(avg_deaths_gf, 5))
print('Avg Tests Growth Factor (past', days_passed ,'days):', round(avg_tests_gf, 5))
print('Avg Recovered Growth Factor (past', days_passed ,'days):', round(avg_recovered_gf, 5))
print('Avg Hospitalized Growth Factor (past', days_passed ,'days):', round(avg_hospitalized_gf, 5))
print('Avg Active Cases Growth Factor (past', days_passed ,'days):', round(avg_active_gf, 5))
counter = 0
while(counter < next_days):
total_cases = total_cases * avg_cases_gf
total_deaths = total_deaths * avg_deaths_gf
total_tests = total_tests * avg_tests_gf
total_recovered = total_recovered * avg_recovered_gf
total_hospitalized = total_hospitalized * avg_hospitalized_gf
total_active = total_active * avg_active_gf
counter += 1
print("Projections for the next", next_days, "days:")
print("Cases:", round(total_cases))
print("Active:", round(total_active))
print("Deaths:", round(total_deaths))
print("Tests:", round(total_tests))
print("Recovered:", round(total_recovered))
print("Hospitalized:", round(total_hospitalized))
def linear_regression(x, y):
x_nums = [i for i in range(0, len(x))] #create list of integers given that original x are string values
n = len(x_nums) #number of elements in x axis (same as y axis)
add_x = sum(x_nums) #add all x axis elements
add_y = sum(y) #add all y axis elements
add_x_sqr = sum([i**2 for i in x_nums]) #add all y axis elements squared
add_xy = sum([x_nums[i] * y[i] for i in range(0, n)]) #add the product of each corresponding pair from x_nums and y
slope = (n * add_xy - add_x * add_y) / (n * add_x_sqr - add_x**2) #compute slope of linear regression
y_intercept = (add_y * add_x_sqr - add_x * add_xy) / (n * add_x_sqr - add_x**2) #compute the y intercept of the linear regression
lin_reg_x = [i for i in range(0, len(x_nums))] #create list of elements from 0 to length of x_nums
lin_reg_y = [slope * i + y_intercept for i in lin_reg_x] #replace x value in equation to find the y in linear regression
return [slope, y_intercept, lin_reg_y] #return slope, y_intercept, and linear regression list for y
def plot_graph(x, y, color, x_label, y_label, chart_title, file_name='', save=False, log_view=False, trend=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
if(log_view):
plt.yscale('log')
if(trend):
lin_reg_result = linear_regression(x, y)
lin_reg_equation = str(lin_reg_result[0])[:10] + 'X '
if(lin_reg_result[1] >= 0):
lin_reg_equation += '+'
lin_reg_equation += str(lin_reg_result[1])[:10]
plt.plot(x, lin_reg_result[2], color + '--', label = lin_reg_equation)
plt.legend(loc='upper left')
plt.plot(x, y, 'ko', x, y, color)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def plot_graph_all(parsed_data, chart_title, from_day, to_day, file_name='', save=False):
plt.figure(figsize=(14,10))
plt.ticklabel_format(style='plain')
plt.title(chart_title, fontdict={'fontsize' : 25})
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[1][from_day:to_day], 'b', label = "Cases")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[2][from_day:to_day], 'r', label = "Deaths")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[4][from_day:to_day], 'g', label = "Recovered")
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'ko')
plt.plot(parsed_data[4][from_day:to_day], parsed_data[18][from_day:to_day], 'k', label = "Active Cases")
plt.legend(loc="upper left")
plt.xlabel("Days")
plt.grid()
if(save):
warnings.filterwarnings('ignore')
plt.plotplt.savefig('../export/graphs/' + file_name)
else:
plt.show()
def print_cases(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[2]), end = '')
print('%13s'%(header[3]), end = '')
print('%13s'%(header[4]), end = '')
print('%13s'%(header[18]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[1][i]), '%12s'%(data[7][i]), '%12s'%(str(data[8][i])[:8]), '%12s'%(data[18][i]))
def print_deaths(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[5]), end = '')
print('%13s'%(header[6]), end = '')
print('%13s'%(header[7]), end = '')
print('%13s'%(header[5]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[2][i]), '%12s'%(data[9][i]), '%12s'%(data[10][i]), '%12s'%(data[17][i]))
def print_tests(header, data):
np.set_printoptions(precision=3)
print('%10s'%(header[0]), end = '')
print('%9s'%(header[1]), end = '')
print('%13s'%(header[14]), end = '')
print('%13s'%(header[15]), end = '')
print('%13s'%(header[16]))
for i in range(len(data[0])):
print('%10s'%(data[0][i]), '%8s'%(data[6][i]), '%12s'%(data[3][i]), '%12s'%(data[15][i]), '%12s'%(data[16][i]))
def print_recovered(header, data):
| np.set_printoptions(precision=3) | numpy.set_printoptions |
################################################################################
# Copyright (c) 2009-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Coordinate conversions not found in PyEphem."""
from __future__ import print_function, division, absolute_import
import numpy as np
# --------------------------------------------------------------------------------------------------
# --- Geodetic coordinate transformations
# --------------------------------------------------------------------------------------------------
def lla_to_ecef(lat_rad, long_rad, alt_m):
"""Convert WGS84 spherical coordinates to ECEF cartesian coordinates.
This converts a position on the Earth specified in geodetic latitude,
longitude and altitude to earth-centered, earth-fixed (ECEF) cartesian
coordinates. This code assumes the WGS84 earth model, described in
[NIMA2004]_.
Parameters
----------
lat_rad : float or array
Latitude (customary geodetic, not geocentric), in radians
long_rad : float or array
Longitude, in radians
alt_m : float or array
Altitude, in metres above WGS84 ellipsoid
Returns
-------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
References
----------
.. [NIMA2004] National Imagery and Mapping Agency, "Department of Defense
World Geodetic System 1984," NIMA TR8350.2, Page 4-4, last updated
June, 2004.
"""
# WGS84 Defining Parameters
a = 6378137.0 # semi-major axis of Earth in m
f = 1.0 / 298.257223563 # flattening of Earth
# WGS84 derived geometric constants
e2 = 2 * f - f ** 2 # first eccentricity squared
# intermediate calculation
# (normal, or prime vertical radius of curvature)
R = a / np.sqrt(1.0 - e2 * np.sin(lat_rad) ** 2)
x_m = (R + alt_m) * np.cos(lat_rad) * np.cos(long_rad)
y_m = (R + alt_m) * np.cos(lat_rad) * np.sin(long_rad)
z_m = ((1.0 - e2) * R + alt_m) * np.sin(lat_rad)
return x_m, y_m, z_m
def ecef_to_lla(x_m, y_m, z_m):
"""Convert ECEF cartesian coordinates to WGS84 spherical coordinates.
This converts an earth-centered, earth-fixed (ECEF) cartesian position to a
position on the Earth specified in geodetic latitude, longitude and altitude.
This code assumes the WGS84 earth model.
Parameters
----------
x_m, y_m, z_m : float or array
X, Y, Z coordinates, in metres
Returns
-------
lat_rad : float or array
Latitude (customary geodetic, not geocentric), in radians
long_rad : float or array
Longitude, in radians
alt_m : float or array
Altitude, in metres above WGS84 ellipsoid
Notes
-----
Based on the most accurate algorithm according to Zhu [zhu]_, which is
summarised by Kaplan [kaplan]_ and described in the Wikipedia entry [geo]_.
.. [zhu] <NAME>, "Conversion of Earth-centered Earth-fixed coordinates to
geodetic coordinates," Aerospace and Electronic Systems, IEEE Transactions
on, vol. 30, pp. 957-961, 1994.
.. [kaplan] Kaplan, "Understanding GPS: principles and applications," 1 ed.,
Norwood, MA 02062, USA: Artech House, Inc, 1996.
.. [geo] Wikipedia entry, "Geodetic system", 2009.
"""
# WGS84 Defining Parameters
a = 6378137.0 # semi-major axis of Earth in m
f = 1.0 / 298.257223563 # flattening of Earth
# WGS84 derived geometric constants
b = a * (1.0 - f) # semi-minor axis in m
e2 = 2 * f - f ** 2 # first eccentricity squared
ep2 = f * (2.0 - f) / (1.0 - f) ** 2 # second eccentricity squared
# Define squared terms for convenience
a2, b2 = a ** 2, b ** 2
x2, y2, z2 = x_m ** 2, y_m ** 2, z_m ** 2
r = np.sqrt(x2 + y2)
E2 = a2 - b2
F = 54.0 * b2 * z2
G = r ** 2 + (1 - e2) * z2 - e2 * E2
C = (e2 ** 2 * F * r ** 2) / (G ** 3)
S = (1.0 + C + np.sqrt(C ** 2 + 2 * C)) ** (1. / 3.)
P = F / (3.0 * (S + 1.0 / S + 1.0) ** 2 * G ** 2)
Q = | np.sqrt(1.0 + 2.0 * e2 ** 2 * P) | numpy.sqrt |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 28