prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
import rasterio as rio from sklearn.preprocessing import MinMaxScaler import numpy as np import matplotlib.pyplot as plt def min_max_scale(input_array): scaler = MinMaxScaler(feature_range=(0,1)) ascolumns = input_array.reshape(-1, 1) t = scaler.fit_transform(ascolumns) result = t.reshape(input_array.shape) return result def standardization(input_array): return (input_array - np.mean(input_array)) /
np.std(input_array)
numpy.std
import numpy as np import openmdao.api as om from ...grid_data import GridData class GaussLobattoInterleaveComp(om.ExplicitComponent): r""" Provides a contiguous output at all nodes for inputs which are only known at state discretiation or collocation nodes. """ def initialize(self): self.vars = {} self.options.declare('grid_data', types=GridData, desc='Container object for grid info') def add_var(self, name, shape, units): """ Add a variable to be interleaved. In general these need to be variables whose values are stored separately for state discretization or collocation nodes (such as states or ODE outputs). Parameters ---------- name : str The name of variable as it should appear in the outputs of the component ('interleave_comp.all_values:{name}'). shape : tuple The shape of the variable at each instance in time. units : str The units of the variable. """ self.vars[name] = {'shape': shape, 'units': units} def setup(self): num_disc_nodes = self.options['grid_data'].subset_num_nodes['state_disc'] num_col_nodes = self.options['grid_data'].subset_num_nodes['col'] num_nodes = self.options['grid_data'].subset_num_nodes['all'] self._varnames = {} for name, options in self.vars.items(): shape = options['shape'] units = options['units'] size =
np.prod(shape)
numpy.prod
import sys import numpy as np from typing import Tuple import IMLearn.metrics from IMLearn.metalearners.adaboost import AdaBoost from IMLearn.learners.classifiers import DecisionStump from utils import * import plotly.graph_objects as go from plotly.subplots import make_subplots def decision_surface(predict, t, xrange, yrange, density=120, dotted=False, colorscale=custom, showscale=True): xrange, yrange = np.linspace(*xrange, density), np.linspace(*yrange, density) xx, yy = np.meshgrid(xrange, yrange) pred = predict(np.c_[xx.ravel(), yy.ravel()], t) if dotted: return go.Scatter(x=xx.ravel(), y=yy.ravel(), opacity=1, mode="markers", marker=dict(color=pred, size=1, colorscale=colorscale, reversescale=False), hoverinfo="skip", showlegend=False) return go.Contour(x=xrange, y=yrange, z=pred.reshape(xx.shape), colorscale=colorscale, reversescale=False, opacity=.7, connectgaps=True, hoverinfo="skip", showlegend=False, showscale=showscale) def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]: """ Generate a dataset in R^2 of specified size Parameters ---------- n: int Number of samples to generate noise_ratio: float Ratio of labels to invert Returns ------- X: np.ndarray of shape (n_samples,2) Design matrix of samples y: np.ndarray of shape (n_samples,) Labels of samples """ ''' generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples). num_samples: the number of samples to generate noise_ratio: invert the label for this ratio of the samples ''' X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n) y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1 y[np.random.choice(n, int(noise_ratio * n))] *= -1 return X, y def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500): (train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data( test_size, noise) # Question 1: Train- and test errors of AdaBoost in noiseless case ada = AdaBoost(DecisionStump, n_learners) ada._fit(train_X, train_y) train_error = np.zeros(n_learners) test_error =
np.zeros(n_learners)
numpy.zeros
import numpy as np from scipy import optimize from matplotlib import pyplot as plt, cm, colors def distance_from_center(c, x, y): ''' Distance of each 2D points from the center (xc, yc) Parameters ---------- c : array_like Coordinates of the center x,y : array_like Arrays with the x,y coordinates ''' xc = c[0] yc = c[1] Ri = np.sqrt((x-xc)**2 + (y-yc)**2) return Ri - Ri.mean() def least_square_circle(x, y): ''' Least-square determination of the center of a circle Parameters ---------- x,y : array_like Arrays with the x,y coordinates of the points on/inside the circle ''' # coordinates of the barycenter x_m = np.mean(x) y_m = np.mean(y) center_estimate = x_m, y_m center, ier = optimize.leastsq(distance_from_center, center_estimate, args=(x, y)) # results xc, yc = center Ri = np.sqrt((x-xc)**2 + (y-yc)**2) R = Ri.mean() residu = np.sum((Ri - R)**2) return xc, yc, R, residu def plot_data_circle(x, y, xc, yc, R): f = plt.figure(0) plt.axis('equal') theta_fit = np.linspace(-np.pi, np.pi, 180) x_fit = xc + R*
np.cos(theta_fit)
numpy.cos
import numpy as np from functools import reduce class Kinematics: def __init__(self): return @classmethod def TCPFrame(cls,a,alfa,d,angles): z1 = zip(a,alfa,d,angles) matrices = map(lambda tup: cls.DHmatrix(tup[0], tup[1], tup[2], tup[3]), z1) finalPos = reduce(lambda m1,m2: m1 * m2, matrices) return finalPos @staticmethod def DHmatrix(a,alfa,d,theta): lam = np.cos(np.array(alfa)) mu = np.sin(np.array(alfa)) DH = np.matrix([ [np.cos(theta), -lam*np.sin(theta), mu*np.sin(theta), a*np.cos(theta)], [np.sin(theta), lam*np.cos(theta), -mu*np.cos(theta), a*
np.sin(theta)
numpy.sin
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collection of simple 2d functions defined on [-1, 1]^2 domain.""" import functools import numpy as np import sklearn.datasets as skdatasets def xor(bs): x = np.random.random(size=(bs, 2)) *2 - 1 return x,
np.sign(x[:, 0] * x[:, 1])
numpy.sign
import numpy as np import scipy from scipy.stats import qmc from scipy.stats import special_ortho_group import matplotlib.pyplot as plt from scipy.optimize import minimize import warnings from .ssp import SSP class SSPSpace: def __init__(self, domain_dim: int, ssp_dim: int, axis_matrix=None, phase_matrix=None, domain_bounds=None, length_scale=1): self.sample_points = None self.sample_ssps = None self.domain_dim = domain_dim self.ssp_dim = ssp_dim if not isinstance(length_scale, np.ndarray) or length_scale.size == 1: self.length_scale = length_scale * np.ones((self.domain_dim,)) if domain_bounds is not None: assert domain_bounds.shape[0] == domain_dim self.domain_bounds = domain_bounds if (axis_matrix is None) & (phase_matrix is None): raise RuntimeError("SSP spaces must be defined by either a axis matrix or phase matrix. Use subclasses to construct spaces with predefined axes.") elif (phase_matrix is None): assert axis_matrix.shape[0] == ssp_dim, f'Expected ssp_dim {axis_matrix.shape[0]}, got {ssp_dim}.' assert axis_matrix.shape[1] == domain_dim self.axis_matrix = axis_matrix self.phase_matrix = (-1.j*np.log(np.fft.fft(axis_matrix,axis=0))).real elif (axis_matrix is None): assert phase_matrix.shape[0] == ssp_dim assert phase_matrix.shape[1] == domain_dim self.phase_matrix = phase_matrix self.axis_matrix = np.fft.ifft(np.exp(1.j*phase_matrix), axis=0).real def update_lengthscale(self, scale): if not isinstance(scale, np.ndarray) or scale.size == 1: self.length_scale = scale * np.ones((self.domain_dim,)) else: assert scale.size == self.domain_dim self.length_scale = scale assert self.length_scale.size == self.domain_dim def encode(self,x): assert x.shape[0] == self.domain_dim ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten())) scaled_x = ls_mat @ x data = np.fft.ifft( np.exp( 1.j * self.phase_matrix @ scaled_x ), axis=0 ).real return data def encode_and_deriv(self,x): ls_mat = np.atleast_2d(np.diag(1 / self.length_scale)) scaled_x = x @ ls_mat fdata = np.exp( 1.j * self.phase_matrix @ scaled_x.T ) data = np.fft.ifft( fdata, axis=0 ).real ddata = np.fft.ifft( 1.j * np.stack([np.diag(fdata[:,j]) for j in range(x.shape[0])]) @ self.phase_matrix @ ls_mat, axis=0 ).real return data.T, ddata.T def encode_fourier(self,x): assert x.shape[0] == self.domain_dim ls_mat = np.atleast_2d(np.diag(1/self.length_scale.flatten())) scaled_x = ls_mat @ x data = np.exp( 1.j * self.phase_matrix @ scaled_x ) return data def encode_as_SSP(self,x): assert x.shape[0] == self.domain_dim return SSP(self.encode(x),self) def decode(self,ssp,method='from-set', num_sample_pts=10000,from_set_method='grid',num_init_pts =10): if method=='least-squares': # problems duw to complex log x = np.linalg.lstsq(self.phase_matrix, (1.j*np.log(np.fft.fft(ssp,axis=0))).real)[0] #raise NotImplementedError() #fssp = np.fft.fft(ssp,axis=0) #x = np.linalg.lstsq(np.tile(self.phase_matrix,(2,1)), np.hstack([np.arccos(fssp.real), np.arcsin(fssp.imag)])) return x elif method=='from-set': sample_ssps, sample_points = self.get_sample_ssps(num_sample_pts,method=from_set_method) sims = sample_ssps.T @ ssp return sample_points[:,np.argmax(sims)] elif method=='direct-optim': x0 = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts) def min_func(x,target=ssp): x_ssp = self.encode(np.atleast_2d(x)) return -np.inner(x_ssp, target).flatten() soln = minimize(min_func, x0, method='L-BFGS-B') return soln.x elif method=='grad_descent': x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts) fssp = np.fft.fft(ssp,axis=0) ls_mat = np.diag(1/self.length_scale.flatten()) for j in range(10): scaled_x = ls_mat @ x x_enc = np.exp(1.j * self.phase_matrix @ scaled_x) grad_mat = (1.j * (self.phase_matrix @ ls_mat).T * x_enc) grad = (grad_mat @ fssp.T).flatten() x = x - 0.1*grad.real return x elif method=='nonlin-reg': x = self.decode(ssp, method='from-set',num_sample_pts=num_init_pts) fssp = np.fft.fft(ssp,axis=0) dy = np.hstack([fssp.real, fssp.imag]) ls_mat = np.diag(1/self.length_scale.flatten()) for j in range(10): J = np.vstack([self.phase_matrix * np.sin(self.phase_matrix @ x @ ls_mat).reshape(1,-1), -self.phase_matrix * np.cos(self.phase_matrix @ x @ ls_mat).reshape(1,-1)]) soln = np.linalg.pinv(J.T @ J) @ J.T @ dy x = x + soln return x else: raise NotImplementedError() def clean_up(self,ssp,**kwargs): x = self.decode(ssp,**kwargs) return self.encode(x) def get_sample_points(self,num_points,method='grid'): if self.domain_bounds is None: bounds = np.vstack([-10*np.ones(self.domain_dim), 10*np.ones(self.domain_dim)]).T else: bounds = self.domain_bounds if method=='grid': n_per_dim = int(num_points**(1/self.domain_dim)) if n_per_dim**self.domain_dim != num_points: warnings.warn((f'Evenly distributing points over a ' f'{self.domain_dim} grid requires numbers ' f'of samples to be powers of {self.domain_dim}.' f'Requested {num_points} samples, returning ' f'{n_per_dim**self.domain_dim}'), RuntimeWarning) ### end if xs = np.linspace(bounds[:,0],bounds[:,1],n_per_dim) xxs = np.meshgrid(*[xs[:,i] for i in range(self.domain_dim)]) sample_points = np.array([x.reshape(-1) for x in xxs]) return sample_points elif method=='sobol': sampler = qmc.Sobol(d=self.domain_dim) lbounds = bounds[:,0] ubounds = bounds[:,1] u_sample_points = sampler.random(num_points) sample_points = qmc.scale(u_sample_points, lbounds, ubounds) return sample_points.T else: raise NotImplementedError() def get_sample_ssps(self,num_points,**kwargs): # make new if num_pts different than whats stored? sample_points = self.get_sample_points(num_points,**kwargs) sample_ssps = self.encode(sample_points) return sample_ssps, sample_points def identity(self): s = np.zeros(self.ssp_dim) s[0] = 1 return s def bind(self,a,b): return np.fft.ifft(np.fft.fft(a) * np.fft.fft(b)).real def invert(self,a): return a[-np.arange(len(a))] def normalize(self,ssp): return ssp/np.max([1e-6,np.sqrt(np.sum(ssp**2))]) def unitary(self,ssp): fssp = np.fft.fft(ssp) fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2) return np.fft.ifft(fssp).real def unitary_fourier(self,fssp): fssp = fssp/np.sqrt(fssp.real**2 + fssp.imag**2) return fssp def decode_path(self, ssp_path, N_ma=None, n_samples = 10000): sample_ssps, sample_points = self.get_sample_ssps(n_samples) path = np.zeros((ssp_path.shape[0], self.domain_dim)) max_sims = np.zeros(ssp_path.shape[0]) for i in range(ssp_path.shape[0]): sims = sample_ssps.T @ ssp_path[i,:] max_sims[i] = np.max(sims) path[i,:] = sample_points[:,np.argmax(sims)] return path, max_sims def similarity_plot(self,ssp,n_grid=100,plot_type='heatmap',cmap="YlGnBu",ax=None,**kwargs): if ax is None: fig = plt.figure() ax = fig.add_subplot(111) if self.domain_dim == 1: xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid) im=ax.plot(xs, self.encode(xs.reshape(1,-1)).T @ self.data) ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1]) elif self.domain_dim == 2: xs = np.linspace(self.domain_bounds[0,0],self.domain_bounds[0,1], n_grid) ys = np.linspace(self.domain_bounds[1,0],self.domain_bounds[1,1], n_grid) X,Y = np.meshgrid(xs,ys) sims = self.encode(np.vstack([X.reshape(-1),Y.reshape(-1)])).T @ ssp if plot_type=='heatmap': im=ax.pcolormesh(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs) elif plot_type=='contour': im=ax.contour(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs) elif plot_type=='contourf': im=ax.contourf(X,Y,sims.reshape(X.shape),cmap=cmap,**kwargs) ax.set_xlim(self.domain_bounds[0,0],self.domain_bounds[0,1]) ax.set_ylim(self.domain_bounds[1,0],self.domain_bounds[1,1]) else: raise NotImplementedError() return im class RandomSSPSpace(SSPSpace): def __init__(self, domain_dim: int, ssp_dim: int, domain_bounds=None, length_scale=1, rng=np.random.default_rng()): partial_phases = rng.random.rand(ssp_dim//2,domain_dim)*2*np.pi - np.pi axis_matrix = _constructaxisfromphases(partial_phases) super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix, domain_bounds=domain_bounds,length_scale=length_scale) class HexagonalSSPSpace(SSPSpace): def __init__(self, domain_dim:int,ssp_dim: int=151, n_rotates:int=5, n_scales:int=5, scale_min=2*np.pi/np.sqrt(6) - 0.5, scale_max=2*np.pi/np.sqrt(6) + 0.5, domain_bounds=None, length_scale=1): if (n_rotates==5) & (n_scales==5) & (ssp_dim != 151): n_rotates = int(np.max([1,np.sqrt((ssp_dim-1)/(2*(domain_dim+1)))])) n_scales = n_rotates phases_hex = np.hstack([np.sqrt(1+ 1/domain_dim)*np.identity(domain_dim) - (domain_dim**(-3/2))*(np.sqrt(domain_dim+1) + 1), (domain_dim**(-1/2))*np.ones((domain_dim,1))]).T self.grid_basis_dim = domain_dim + 1 self.num_grids = n_rotates*n_scales scales = np.linspace(scale_min,scale_max,n_scales) phases_scaled = np.vstack([phases_hex*i for i in scales]) if (n_rotates==1): phases_scaled_rotated = phases_scaled elif (domain_dim==1): scales = np.linspace(scale_min,scale_max,n_scales+n_rotates) phases_scaled_rotated = np.vstack([phases_hex*i for i in scales]) elif (domain_dim == 2): angles = np.linspace(0,2*np.pi/3,n_rotates) R_mats = np.stack([np.stack([np.cos(angles), -np.sin(angles)],axis=1), np.stack([np.sin(angles), np.cos(angles)], axis=1)], axis=1) phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim) else: R_mats = special_ortho_group.rvs(domain_dim, size=n_rotates) phases_scaled_rotated = (R_mats @ phases_scaled.T).transpose(0,2,1).reshape(-1,domain_dim) axis_matrix = _constructaxisfromphases(phases_scaled_rotated) ssp_dim = axis_matrix.shape[0] super().__init__(domain_dim,ssp_dim,axis_matrix=axis_matrix, domain_bounds=domain_bounds,length_scale=length_scale) def sample_grid_encoders(self, n): sample_pts = self.get_sample_points(n,method='sobol') N = self.num_grids if N < n: sorts = np.hstack([np.arange(N), np.random.randint(0, N - 1, size = n - N)]) else: sorts = np.arange(n) encoders = np.zeros((self.ssp_dim,n)) for i in range(n): sub_mat = _get_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim) proj_mat = _proj_sub_SSP(sorts[i],N,sublen=self.grid_basis_dim) sub_space = SSPSpace(self.domain_dim,2*self.grid_basis_dim + 1, axis_matrix= sub_mat @ self.axis_matrix) encoders[:,i] = N * proj_mat @ sub_space.encode(sample_pts[:,i]) return encoders def _constructaxisfromphases(K): d = K.shape[0] n = K.shape[1] axes = np.ones((d*2 + 1,n)) for i in range(n): F = np.ones((d*2 + 1,), dtype="complex") F[0:d] = np.exp(1.j*K[:,i]) F[-d:] = np.flip(np.conj(F[0:d])) F = np.fft.ifftshift(F) axes[:,i] = np.fft.ifft(F).real return axes def _get_sub_FourierSSP(n, N, sublen=3): # Return a matrix, \bar{A}_n # Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper # Then # \bar{A}_n F{S_{total}} = F{S_n} # i.e. pick out the sub vector in the Fourier domain tot_len = 2*sublen*N + 1 FA = np.zeros((2*sublen + 1, tot_len)) FA[0:sublen, sublen*n:sublen*(n+1)] = np.eye(sublen) FA[sublen, sublen*N] = 1 FA[sublen+1:, tot_len - np.arange(sublen*(n+1),sublen*n,-1)] = np.eye(sublen) return FA def _get_sub_SSP(n,N,sublen=3): # Return a matrix, A_n # Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper # Then # A_n S_{total} = S_n # i.e. pick out the sub vector in the time domain tot_len = 2*sublen*N + 1 FA = _get_sub_FourierSSP(n,N,sublen=sublen) W = np.fft.fft(np.eye(tot_len)) invW = np.fft.ifft(np.eye(2*sublen + 1)) A = invW @ np.fft.ifftshift(FA) @ W return A.real def _proj_sub_FourierSSP(n,N,sublen=3): # Return a matrix, \bar{B}_n # Consider the multi scale representation (S_{total}) and sub vectors (S_n) described in the paper # Then # \sum_n \bar{B}_n F{S_{n}} = F{S_{total}} # i.e. project the sub vector in the Fourier domain such that summing all such projections gives the full vector in Fourier domain tot_len = 2*sublen*N + 1 FB = np.zeros((2*sublen + 1, tot_len)) FB[0:sublen, sublen*n:sublen*(n+1)] =
np.eye(sublen)
numpy.eye
import numpy as np from .Templates import Estimator_mimo, Descriptor from SCM3GPP.scm_helper_MIMO import scm_channel from SCM3GPP.toeplitz_helpers import best_circulant_approximation, vec2mat, mat2vec, best_block_circulant_approximation from training_CNN_mimo import pilot_matrix from scipy.linalg import toeplitz, circulant class FastMMSE(Estimator_mimo, Descriptor): _object_counter = 1 def __init__(self, channel, snr, n_antennas_BS, n_antennas_MS, n_pilots, name=None): self.snr = snr self.n_pilots = n_pilots self.n_antennas_BS = n_antennas_BS self.n_antennas_MS = n_antennas_MS self.channel_config = channel.get_config() self.rho = [] F_BS = np.fft.fft(np.eye(n_antennas_BS)) F_MS = np.fft.fft(np.eye(n_antennas_MS)) self.F = 1 / np.sqrt(n_antennas_BS * n_antennas_MS) * np.kron(F_MS,F_BS) _, t_BS, t_MS = scm_channel(np.array([0.0]), np.array([0.0]), np.array([1.0]), 1, n_antennas_BS, n_antennas_MS,sigma_BS=channel.path_sigma_BS,sigma_MS=channel.path_sigma_MS) C_BS = toeplitz(t_BS) C_MS = toeplitz(t_MS) C =
np.kron(C_MS,C_BS)
numpy.kron
import numpy as np import scipy import cv2 from numpy.fft import fft, ifft from scipy import signal from lib.eco.fourier_tools import resize_dft from .feature import extract_hog_feature from lib.utils import cos_window from lib.fft_tools import ifft2,fft2 class DSSTScaleEstimator: def __init__(self,target_sz,config): init_target_sz = np.array([target_sz[0],target_sz[1]]) self.config=config num_scales = self.config.number_of_scales_filter scale_step = self.config.scale_step_filter scale_sigma = self.config.number_of_interp_scales * self.config.scale_sigma_factor scale_exp = np.arange(-np.floor(num_scales - 1)/2, np.ceil(num_scales-1)/2+1, dtype=np.float32) * self.config.number_of_interp_scales / num_scales scale_exp_shift = np.roll(scale_exp, (0, -int(np.floor((num_scales-1)/2)))) interp_scale_exp = np.arange(-np.floor((self.config.number_of_interp_scales - 1) / 2), np.ceil((self.config.number_of_interp_scales - 1) / 2) + 1, dtype=np.float32) interp_scale_exp_shift = np.roll(interp_scale_exp, [0, -int(np.floor(self.config.number_of_interp_scales - 1) / 2)]) self.scale_size_factors = scale_step ** scale_exp self.interp_scale_factors = scale_step ** interp_scale_exp_shift ys = np.exp(-0.5 * (scale_exp_shift ** 2) / (scale_sigma ** 2)) self.yf = np.real(fft(ys)) self.window = np.hanning(ys.shape[0]).T.astype(np.float32) # make sure the scale model is not to large, to save computation time self.num_scales = num_scales self.scale_step = scale_step if self.config.scale_model_factor ** 2 * np.prod(init_target_sz) > self.config.scale_model_max_area: scale_model_factor = np.sqrt(self.config.scale_model_max_area / np.prod(init_target_sz)) else: scale_model_factor = self.config.scale_model_factor # set the scale model size self.scale_model_sz = np.maximum(np.floor(init_target_sz * scale_model_factor), np.array([8, 8])) self.max_scale_dim = self.config.s_num_compressed_dim == 'MAX' if self.max_scale_dim: self.s_num_compressed_dim = len(self.scale_size_factors) else: self.s_num_compressed_dim = self.config.s_num_compressed_dim def init(self,im,pos,base_target_sz,current_scale_factor): # self.scale_factors = np.array([1]) scales = current_scale_factor * self.scale_size_factors xs = self._extract_scale_sample(im, pos, base_target_sz, scales, self.scale_model_sz) self.s_num = xs # compute projection basis if self.max_scale_dim: self.basis, _ = scipy.linalg.qr(self.s_num, mode='economic') scale_basis_den, _ = scipy.linalg.qr(xs, mode='economic') else: U, _, _ = np.linalg.svd(self.s_num) self.basis = U[:, :self.s_num_compressed_dim] V, _, _ = np.linalg.svd(xs) scale_basis_den = V[:, :self.s_num_compressed_dim] self.basis = self.basis.T # compute numerator feat_proj = self.basis.dot(self.s_num) * self.window sf_proj = np.fft.fft(feat_proj, axis=1) self.sf_num = self.yf * np.conj(sf_proj) # update denominator xs = scale_basis_den.T.dot(xs)*self.window xsf = fft(xs, axis=1) new_sf_den = np.sum((xsf * np.conj(xsf)), 0) self.sf_den = new_sf_den def update(self, im, pos, base_target_sz, current_scale_factor): base_target_sz=
np.array([base_target_sz[0],base_target_sz[1]])
numpy.array
import tensorflow as tf import numpy as np from tensorflow.python.client import timeline from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file from input_pipelines.input_pipeline_cityscapes import inference_input from model import PanopticSegmentationModel from config import Params from PIL import Image, ImageDraw from utils import draw_utils, mask_utils_np from utils.load_json_to_params import load_json_to_params import time import argparse import skimage import os from scipy.misc import imresize import matplotlib.pyplot as plt import cv2 OFFSET = 1000 THICK_BORDERS = False def get_arguments(): parser = argparse.ArgumentParser(description="Panoptic-Slim Network") parser.add_argument("--json_path", type=str, default='', help="The path to the json file containing the parameters") return parser.parse_args() def predict(params): dataset = inference_input(params) iterator = dataset.make_one_shot_iterator() image, im_name, im_raw = iterator.get_next() model = PanopticSegmentationModel(image, None, params) prediction_dict = model.predict() prediction_dict = model.postprocess(prediction_dict) config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) global_init = tf.global_variables_initializer() local_init = tf.local_variables_initializer() sess.run(global_init) sess.run(local_init) restore_var = tf.global_variables() ckpt = tf.train.get_checkpoint_state(params.checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: loader = tf.train.Saver(var_list=restore_var) model.load(loader, sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found.') # Plotting settings fig = plt.figure(0, [16, 8], dpi=80) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True) run_metadata = tf.RunMetadata() for i in range(params.num_steps_predict): start_time = time.time() print(i) if i == 1: prediction_dict_out, image_out, im_name_out, im_raw_out = sess.run([prediction_dict, image, im_name, im_raw], options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() new_path = os.path.join(params.log_dir, 'timeline_predict.json') print(new_path) with open(new_path, 'w') as f: f.write(chrome_trace) else: prediction_dict_out, image_out, im_name_out, im_raw_out = sess.run([prediction_dict, image, im_name, im_raw]) duration = time.time() - start_time print('({:.3f} sec/step)'.format(duration)) if params.apply_semantic_branch and params.apply_instance_branch: panoptic_out = prediction_dict_out['panoptic'][0] class_ids = panoptic_out[..., 0] max = len(params.cids2colors) - 1 class_ids[class_ids == 255] = max # class_ids[class_ids > max] = max colorpalettes = np.array(params.cids2colors, dtype=np.uint8) class_colors = colorpalettes[class_ids] panoptic_for_edges = np.stack([class_ids, panoptic_out[..., 1], np.zeros_like(class_ids)], axis=2).astype(np.uint8) # print(panoptic_for_edges.shape) edges = cv2.Canny(panoptic_for_edges, 1, 2) if THICK_BORDERS: edges_2 = cv2.Canny(edges, 100, 200) edges_total = np.minimum(edges + edges_2, 255) else: edges_total = edges edges_bool = (edges_total / 255).astype(np.bool) edges_invert = np.invert(edges_bool) edges_invert = edges_invert.astype(np.uint8) class_colors = class_colors.astype(np.uint8) *
np.expand_dims(edges_invert, axis=2)
numpy.expand_dims
import roslib import sys import rospy import cv2 import math import imutils import statistics import numpy as np from std_msgs.msg import String from sensor_msgs.msg import Image from std_msgs.msg import Float64MultiArray, Float64 from cv_bridge import CvBridge, CvBridgeError from scipy.spatial import distance as dist class image_converter: # Defines publisher and subscriber def __init__(self): # initialize the node named image_processing rospy.init_node('image_processing', anonymous=True) # initialize a publisher to send images from camera1 to a topic named image_topic1 self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=1) self.image_pub2 = rospy.Publisher("image_topic2", Image, queue_size=1) #Initialize a publisher to send joints angular posiion toa topic called joints_pos self.joints_pub=rospy.Publisher("joints_pos",Float64MultiArray,queue_size=10) #initialize a publisher for the robot end effector self.vision_end_effector_pub=rospy.Publisher("vision_end_effector",Float64MultiArray,queue_size=10) self.fk_end_effector_pub = rospy.Publisher("fk_end_effector", Float64MultiArray, queue_size=10) self.actual_target_trajectory_pub = rospy.Publisher("actual_target_trajectory", Float64MultiArray,queue_size=10) self.vision_target_trajectory_pub = rospy.Publisher("vision_target_trajectory", Float64MultiArray,queue_size=10) #initialize a publisher for the four angles self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10) self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10) self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10) self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10) #Initialize the publisher for t target self.target_x_pub = rospy.Publisher("/target/x_position_controller/command", Float64, queue_size=10) self.target_y_pub = rospy.Publisher("/target/y_position_controller/command", Float64, queue_size=10) self.target_z_pub = rospy.Publisher("/target/z_position_controller/command", Float64, queue_size=10) # initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data self.image_sub1 = rospy.Subscriber("/camera1/robot/image_raw", Image, self.callback1) self.image_sub2 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2) #initialize a publisher to send desired trajectory self.time_trajectory = rospy.get_time() #initialize variables self.red = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64') self.green = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64') self.p2m = np.array([0.0], dtype='float64') self.joint1 = np.array([0.0], dtype='float64') self.joint2 = np.array([0.0], dtype='float64') self.joint3 = np.array([0.0], dtype='float64') self.joint4 = np.array([0.0], dtype='float64') # initialize errors self.time_previous_step = np.array([rospy.get_time()], dtype='float64') self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64') # initialize error and derivative of error for trajectory tracking self.error = np.array([0.0, 0.0,0.0], dtype='float64') self.error_d = np.array([0.0, 0.0,0.0], dtype='float64') # initialize the bridge between openCV and ROS self.bridge = CvBridge() # Recieve data from camera 1, process it, and publish def callback1(self, data): # Recieve the image try: self.image1 = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) def callback2(self, data): # Recieve the image try: self.image2 = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) #Blob detection starts here------------------------------------------------------- #Same to 2_1_joint_estimation.py def detect_red(self,image1, image2): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([0, 200, 0]) higher_red1 = np.array([0, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0]) cy, cz1 = (int(x1), int(y1)) radius1 = int(radius1) image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0) hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV) lower_red2 = np.array([0, 200, 0]) higher_red2 = np.array([0, 255, 255]) red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2) res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2) red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY) canny_edge2 = cv2.Canny(red_s_gray2, 30, 70) contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0]) cx, cz2 = (int(x2), int(y2)) radius2 = int(radius2) return np.array([cx, cy, cz1, cz2]) def detect_blue(self,image1, image2): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([70, 0, 0]) higher_red1 = np.array([255, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0]) cy, cz1 = (int(x1), int(y1)) radius1 = int(radius1) image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0) hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV) lower_red2 = np.array([70, 0, 0]) higher_red2 = np.array([255, 255, 255]) red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2) res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2) red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY) canny_edge2 = cv2.Canny(red_s_gray2, 30, 70) contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0]) cx, cz2 = (int(x2), int(y2)) radius2 = int(radius2) return np.array([cx, cy, cz1, cz2]) def detect_green(self,image1, image2): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([55, 0, 0]) higher_red1 = np.array([100, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0]) cy, cz1 = (int(x1), int(y1)) radius1 = int(radius1) image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0) hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV) lower_red2 = np.array([55, 0, 0]) higher_red2 = np.array([100, 255, 255]) red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2) res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2) red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY) canny_edge2 = cv2.Canny(red_s_gray2, 30, 70) contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0]) cx, cz2 = (int(x2), int(y2)) radius2 = int(radius2) return np.array([cx, cy, cz1, cz2]) def detect_yellow(self,image1, image2): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([16, 244, 0]) higher_red1 = np.array([51, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0]) cy, cz1 = (int(x1), int(y1)) radius1 = int(radius1) image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0) hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV) lower_red2 = np.array([16, 244, 0]) higher_red2 = np.array([51, 255, 255]) red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2) res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2) red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY) canny_edge2 = cv2.Canny(red_s_gray2, 30, 70) contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0]) cx, cz2 = (int(x2), int(y2)) radius2 = int(radius2) return np.array([cx, cy, cz1, cz2]) def detect_blue_contours(image1): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([70, 0, 0]) higher_red1 = np.array([255, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) return np.array([contours1]) def detect_yellow_contours(image1): image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0) hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV) lower_red1 = np.array([16, 244, 0]) higher_red1 = np.array([51, 255, 255]) red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1) res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1) red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY) canny_edge1 = cv2.Canny(red_s_gray1, 30, 70) contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) (x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0]) cy,cz1 = (int(x1), int(y1)) return np.array([contours1]) def get_y1_y2(yellow_contours, blue_contours): y1 = np.min(yellow_contours, axis = 0) y1 = y1[0][1] y1 = y1[:,1] y2 = np.max(blue_contours, axis = 0) y2 = y2[0][1] y2 = y2[:,1] return y1, y2 def pixelTometer(self, image1,image2): yellow_contours = detect_yellow_contours(image2) blue_contours = detect_blue_contours(image2) y2 = detect_blue(self, image1, image2) y2 = y2[3] y1, y2 = get_y1_y2(yellow_contours, blue_contours) p2m = 2.5/(y1 - y2) #65 is the best number return p2m #---------------------------------------------------------------------------------------------- #Angle Detection starts here #This part is same as 2_1_joint_estimation.py def detect_angles_blob(self,image1,image2): try: p=pixelTometer(self,image1,image2) self.p2m = p except Exception as e: p = self.p2m try: green = detect_green(self, image1, image2) self.green = green except Exception as e: green = self.green try: red = detect_red(self, image1, image2) self.red = red except Exception as e: red = self.red p=pixelTometer(self,image1,image2) yellow=p*detect_yellow(self,image1,image2) blue=p*detect_blue(self,image1,image2) ja1=0.0 ja2=np.pi/2-np.arctan2((blue[2] - green[2]), (blue[1] - green[1])) ja3 = np.arctan2((blue[3] - green[3]), (blue[0] - green[0]))-np.pi/2 ja4 = np.arctan2((green[2] - red[2]), -(green[1] - red[1]))-np.pi/2-ja2 return np.array([ja1,ja2,ja3,ja4]) def angle_trajectory(self): curr_time = np.array([rospy.get_time() - self.time_trajectory]) ja1 = 0.1 ja2 = float((np.pi / 2) * np.sin((np.pi / 15) * curr_time)) ja3 = float((np.pi / 2) * np.sin((np.pi / 18) * curr_time)) ja4 = float((np.pi / 2) * np.sin((np.pi / 20) * curr_time)) return np.array([ja1, ja2, ja3, ja4]) def actual_target_position(self): curr_time = np.array([rospy.get_time() - self.time_trajectory]) x_d = float((2.5 * np.cos(curr_time * np.pi / 15))+0.5) y_d = float(2.5 * np.sin(curr_time * np.pi / 15)) z_d = float((1 * np.sin(curr_time * np.pi / 15))+7.0) return np.array([x_d,y_d,z_d]) #FK starts here-------------------------------------------------------------------------------- #This part is same as 3_1_FK.py def end_effector_position(self, image1, image2): try: p=pixelTometer(self,image1,image2) self.p2m = p except Exception as e: p = self.p2m yellow_posn = detect_yellow(self,image1, image2) red_posn = detect_red(self, image1, image2) yellow_posn[3] = 800 - yellow_posn[3] red_posn[3] = 800 - red_posn[3] cx, cy, cz1, cz2 = p * (red_posn - yellow_posn) ee_posn = np.array([cx, cy, cz2]) ee_posn = np.round(ee_posn,1) return ee_posn #Calculate the jacobian def calculate_jacobian(self,image1,image2): ja1,ja2,ja3,ja4=detect_angles_blob(self,image1,image2) jacobian=np.array([[3*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)*np.cos(ja4) +3.5*np.cos(ja1)*np.sin(ja2)*np.cos(ja3) -3*np.sin(ja1)*np.cos(ja4)*np.sin(ja3) -3.5*np.sin(ja1)*np.sin(ja3) +3*np.cos(ja1)*np.cos(ja2)*np.sin(ja4), 3*
np.sin(ja1)
numpy.sin
# -*- coding: utf-8 -*- import numpy as np import cv2 # landmarks line mask def generate_mask(image, landmarks): ''' generate face mask according to landmarks Args: image: numpy.ndarray landmarks: 68x2 numpy.ndarray Return: a mask map with ''' # layer1: line # layer2: region without expansion # layer3: wider mask linemask = generate_line_mask(image, landmarks) regionmask = generate_region_mask(image, landmarks) widermask = generate_wider_mask(image, landmarks) mask = np.stack([linemask, regionmask, widermask]).transpose(1, 2, 0) # return channel: BGR(linemask, regionmask, widermask)channel0: return mask def generate_line_mask(image, landmarks): linemask = image.copy() # np.zeros_like(image).astype(np.uint8) # face linemask = connect_line(linemask, landmarks[0:17]) # eyebow linemask = connect_line(linemask, landmarks[17:22]) linemask = connect_line(linemask, landmarks[22:27]) # nose linemask = connect_line(linemask, np.vstack([landmarks[27:31], landmarks[33]])) linemask = connect_line(linemask, landmarks[31:36]) # eyes linemask = connect_line(linemask, np.vstack([landmarks[36:42], landmarks[36]])) linemask = connect_line(linemask, np.vstack([landmarks[42:48], landmarks[42]])) # mouth linemask = connect_line(linemask, np.vstack([landmarks[48:60], landmarks[48]])) linemask = connect_line(linemask, np.vstack([landmarks[60:68], landmarks[60]])) return linemask def connect_line(input, landmarks): img = input.copy() size= len(landmarks) for i in range(0, size-1): img = cv2.line(img, (landmarks[i, 0], landmarks[i, 1]), (landmarks[i+1, 0], landmarks[i+1, 1]), (255, 255, 255), 1, cv2.LINE_AA) return img # face landmarks origin def generate_region_mask(image, landmarks): regionmask = np.zeros_like(image[:, :, 0]) ''' Use five landmarks w = (five_landmarks[0, 1] - five_landmarks[1, 1]) / (five_landmarks[0, 0] - five_landmarks[1, 0]) b = five_landmarks[0, 1] - five_landmarks[0, 0] * w ''' # ----- layer2: eye-1 eyepoints = np.vstack([landmarks[17:22], landmarks[36:42]]) hull = cv2.convexHull(eyepoints.astype(np.int32)).astype(np.int32) regionmask = cv2.drawContours(regionmask, [hull], 0, (255), -1) # ----- layer2: eye-2 eyepoints = np.vstack([landmarks[22:27], landmarks[42:48]]) hull = cv2.convexHull(eyepoints.astype(np.int32)).astype(np.int32) regionmask = cv2.drawContours(regionmask, [hull], 0, (255), -1) # ----- layer3: mouth mouthpoints = landmarks[48:68] hull = cv2.convexHull(mouthpoints.astype(np.int32)).astype(np.int32) regionmask = cv2.drawContours(regionmask, [hull], 0, (255), -1) return regionmask #fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cuda:0', flip_input=False) def generate_wider_mask(image, landmarks): ''' generate weight mask according to landmarks :param image: np.ndarray :param landmarks: 68x2 :return: a weight mask with weight of 0, 64, 128, 192 ''' #----- get five landmarks #five_landmarks = convert_to_five_landmarks(landmarks) facemask = generate_facial_mask(image, landmarks) eyemask = generate_eye_mask(image, landmarks) mouthmask = generate_mouth_mask(image, landmarks) weightmask = np.zeros_like(image[:, :, 0]).astype(np.uint8) weightmask[facemask > 0] = 64 weightmask[eyemask > 0] = 128 weightmask[mouthmask > 0] = 192 return weightmask def generate_facial_mask(image, landmarks): ''' generate weight mask according to landmarks :param image: np.ndarray :param landmarks: 68x2 :return: a mask map ''' facemask = np.zeros_like(image[:, :, 0]) # ----- layer1: face region hull = cv2.convexHull(np.int32(landmarks)).astype(np.int32) facemask = cv2.drawContours(facemask, [hull], 0, (255), -1) return facemask def generate_eye_mask(image, landmarks): ''' generate weight mask according to landmarks :param image: np.ndarray :param landmarks: 68x2 :return: a mask map ''' eyemask =
np.zeros_like(image[:, :, 0])
numpy.zeros_like
from __future__ import division import glob import numpy as NP from functools import reduce import numpy.ma as MA import progressbar as PGB import h5py import healpy as HP import warnings import copy import astropy.cosmology as CP from astropy.time import Time, TimeDelta from astropy.io import fits from astropy import units as U from astropy import constants as FCNST from scipy import interpolate from astroutils import DSP_modules as DSP from astroutils import constants as CNST from astroutils import nonmathops as NMO from astroutils import mathops as OPS from astroutils import lookup_operations as LKP import prisim from prisim import interferometry as RI from prisim import primary_beams as PB from prisim import delay_spectrum as DS try: from pyuvdata import UVBeam except ImportError: uvbeam_module_found = False else: uvbeam_module_found = True prisim_path = prisim.__path__[0]+'/' cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc ################################################################################ def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix, triads=None, bltriplet=None, hdf5file_prefix=None, infmt='npz', datakey='noisy', blltol=0.1): """ ---------------------------------------------------------------------------- Write closure phases computed in a PRISim simulation to a NPZ file with appropriate format for further analysis. Inputs: infile_prefix [string] HDF5 file or NPZ file created by a PRISim simulation or its replication respectively. If infmt is specified as 'hdf5', then hdf5file_prefix will be ignored and all the observing info will be read from here. If infmt is specified as 'npz', then hdf5file_prefix needs to be specified in order to read the observing parameters. triads [list or numpy array or None] Antenna triads given as a list of 3-element lists or a ntriads x 3 array. Each element in the inner list is an antenna label. They will be converted to strings internally. If set to None, then all triads determined by bltriplet will be used. If specified, then inputs in blltol and bltriplet will be ignored. bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline vectors. The first axis denotes the three baselines, the second axis denotes the East, North, Up coordinates of the baseline vector. Units are in m. Will be used only if triads is set to None. outfile_prefix [string] Prefix of the NPZ file. It will be appended by '_noiseless', '_noisy', and '_noise' and further by extension '.npz' infmt [string] Format of the input file containing visibilities. Accepted values are 'npz' (default), and 'hdf5'. If infmt is specified as 'npz', then hdf5file_prefix also needs to be specified for reading the observing parameters datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or 'noise' -- visibilities are to be written to the output. If set to None, and infmt is 'hdf5', then all three sets of visibilities are written. The datakey string will also be added as a suffix in the output file. blltol [scalar] Baseline length tolerance (in m) for matching baseline vectors in triads. It must be a scalar. Default = 0.1 m. Will be used only if triads is set to None and bltriplet is to be used. ---------------------------------------------------------------------------- """ if not isinstance(infile_prefix, str): raise TypeError('Input infile_prefix must be a string') if not isinstance(outfile_prefix, str): raise TypeError('Input outfile_prefix must be a string') if (triads is None) and (bltriplet is None): raise ValueError('One of triads or bltriplet must be set') if triads is None: if not isinstance(bltriplet, NP.ndarray): raise TypeError('Input bltriplet must be a numpy array') if not isinstance(blltol, (int,float)): raise TypeError('Input blltol must be a scalar') if bltriplet.ndim != 2: raise ValueError('Input bltriplet must be a 2D numpy array') if bltriplet.shape[0] != 3: raise ValueError('Input bltriplet must contain three baseline vectors') if bltriplet.shape[1] != 3: raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame') else: if not isinstance(triads, (list, NP.ndarray)): raise TypeError('Input triads must be a list or numpy array') triads = NP.asarray(triads).astype(str) if not isinstance(infmt, str): raise TypeError('Input infmt must be a string') if infmt.lower() not in ['npz', 'hdf5']: raise ValueError('Input file format must be npz or hdf5') if infmt.lower() == 'npz': if not isinstance(hdf5file_prefix, str): raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information') if datakey is None: datakey = ['noisy'] if isinstance(datakey, str): datakey = [datakey] elif not isinstance(datakey, list): raise TypeError('Input datakey must be a list') for dkey in datakey: if dkey.lower() not in ['noiseless', 'noisy', 'noise']: raise ValueError('Invalid input found in datakey') if infmt.lower() == 'hdf5': fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower()) fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension] else: fullfnames_without_extension = [infile_prefix] if len(fullfnames_without_extension) == 0: raise IOError('No input files found with pattern {0}'.format(infile_prefix)) try: if infmt.lower() == 'hdf5': simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0]) else: simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix) except: raise IOError('Input PRISim file does not contain a valid PRISim output') latitude = simvis.latitude longitude = simvis.longitude location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day last = last.reshape(-1,1) daydata = NP.asarray(simvis.timestamp[0]).ravel() if infmt.lower() == 'npz': simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower()) skyvis = simvisinfo['noiseless'][0,...] vis = simvisinfo['noisy'] noise = simvisinfo['noise'] n_realize = vis.shape[0] else: n_realize = len(fullfnames_without_extension) cpdata = {} outfile = {} for fileind in range(n_realize): if infmt.lower() == 'npz': simvis.vis_freq = vis[fileind,...] simvis.vis_noise_freq = noise[fileind,...] else: simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind]) if fileind == 0: if triads is None: triads, bltriplets = simvis.getThreePointCombinations(unique=False) # triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets']) triads = NP.asarray(triads).reshape(-1,3) bltriplets = NP.asarray(bltriplets) blinds = [] matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol) revind = [] for blnum in NP.arange(bltriplet.shape[0]): if len(matchinfo[0][blnum]) == 0: revind += [blnum] if len(revind) > 0: flip_factor = NP.ones(3, dtype=NP.float) flip_factor[NP.array(revind)] = -1 rev_bltriplet = bltriplet * flip_factor.reshape(-1,1) matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol) for blnum in NP.arange(bltriplet.shape[0]): if len(matchinfo[0][blnum]) == 0: raise ValueError('Some baselines in the triplet are not found in the model triads') triadinds = [] for blnum in NP.arange(bltriplet.shape[0]): triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1])) triadinds += [triadind] triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2])) if triadind_intersection.size == 0: raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.') triads = triads[triadind_intersection,:] selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3) prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(), delay_filter_info=None, specsmooth_info=None, spectral_window_info=None, unique=False) if fileind == 0: triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips) for outkey in datakey: if fileind == 0: outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey) if outkey == 'noiseless': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...] cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0) if outkey == 'noisy': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...] cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0) if outkey == 'noise': if fileind == 0: # cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:] cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:] else: # cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0) cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0) for outkey in datakey: cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0) flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool) NP.savez_compressed(outfile[outkey], closures=cpdata[outkey], flags=flagsdata, triads=triads, last=last+NP.zeros((1,n_realize)), days=daydata+NP.arange(n_realize)) ################################################################################ def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'): """ ---------------------------------------------------------------------------- Read an input NPZ file containing closure phase data output from CASA and return a dictionary Inputs: npzfile [string] Input NPZ file including full path containing closure phase data. It must have the following files/keys inside: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units which is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) latitude [scalar int or float] Latitude of site (in degrees). Default=0.0 deg. longitude [scalar int or float] Longitude of site (in degrees). Default=0.0 deg. lst_format [string] Specifies the format/units in which the 'last' key is to be interpreted. If set to 'hourangle', the LST is in units of hour angle. If set to 'fracday', the fractional portion of the 'last' value is the LST in units of days. Output: cpinfo [dictionary] Contains one top level keys, namely, 'raw' Under key 'raw' which holds a dictionary, the subkeys include 'cphase' (nlst,ndays,ntriads,nchan), 'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags' (nlst,ndays,ntriads,nchan), and some other optional keys ---------------------------------------------------------------------------- """ npzdata = NP.load(npzfile) cpdata = npzdata['closures'] triadsdata = npzdata['triads'] flagsdata = npzdata['flags'] location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location) # lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD if lst_format.lower() == 'hourangle': lstHA = npzdata['last'] lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s) elif lst_format.lower() == 'fracday': lstfrac, lstint = NP.modf(npzdata['last']) lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD lstHA = lstfrac * 24.0 # in hours else: raise ValueError('Input lst_format invalid') cp = cpdata.astype(NP.float64) flags = flagsdata.astype(NP.bool) cpinfo = {} datapool = ['raw'] for dpool in datapool: cpinfo[dpool] = {} if dpool == 'raw': qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst'] for qty in qtys: if qty == 'cphase': cpinfo[dpool][qty] = NP.copy(cp) elif qty == 'triads': cpinfo[dpool][qty] = NP.copy(triadsdata) elif qty == 'flags': cpinfo[dpool][qty] = NP.copy(flags) elif qty == 'lst': cpinfo[dpool][qty] = NP.copy(lstHA) elif qty == 'lst-day': cpinfo[dpool][qty] = NP.copy(lstday.jd) elif qty == 'days': cpinfo[dpool][qty] = NP.copy(daydata.jd) elif qty == 'dayavg': if 'averaged_closures' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_dayavg) elif qty == 'std_triads': if 'std_dev_triad' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_std_triads) elif qty == 'std_lst': if 'std_dev_lst' in npzdata: cpinfo[dpool][qty] = NP.copy(cp_std_lst) return cpinfo ################################################################################ def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0, lst_format='fracday'): """ ---------------------------------------------------------------------------- Read an input NPZ file containing closure phase data output from CASA and save it to HDF5 format Inputs: npzfile [string] Input NPZ file including full path containing closure phase data. It must have the following files/keys inside: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units ehich is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) hdf5file [string] Output HDF5 file including full path. latitude [scalar int or float] Latitude of site (in degrees). Default=0.0 deg. longitude [scalar int or float] Longitude of site (in degrees). Default=0.0 deg. lst_format [string] Specifies the format/units in which the 'last' key is to be interpreted. If set to 'hourangle', the LST is in units of hour angle. If set to 'fracday', the fractional portion of the 'last' value is the LST in units of days. ---------------------------------------------------------------------------- """ npzdata = NP.load(npzfile) cpdata = npzdata['closures'] triadsdata = npzdata['triads'] flagsdata = npzdata['flags'] location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude)) daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location) # lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD if lst_format.lower() == 'hourangle': lstHA = npzdata['last'] lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s) elif lst_format.lower() == 'fracday': lstfrac, lstint = NP.modf(npzdata['last']) lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD lstHA = lstfrac * 24.0 # in hours else: raise ValueError('Input lst_format invalid') cp = cpdata.astype(NP.float64) flags = flagsdata.astype(NP.bool) if 'averaged_closures' in npzdata: day_avg_cpdata = npzdata['averaged_closures'] cp_dayavg = day_avg_cpdata.astype(NP.float64) if 'std_dev_triad' in npzdata: std_triads_cpdata = npzdata['std_dev_triad'] cp_std_triads = std_triads_cpdata.astype(NP.float64) if 'std_dev_lst' in npzdata: std_lst_cpdata = npzdata['std_dev_lst'] cp_std_lst = std_lst_cpdata.astype(NP.float64) with h5py.File(hdf5file, 'w') as fobj: datapool = ['raw'] for dpool in datapool: if dpool == 'raw': qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst'] for qty in qtys: data = None if qty == 'cphase': data = NP.copy(cp) elif qty == 'triads': data = NP.copy(triadsdata) elif qty == 'flags': data = NP.copy(flags) elif qty == 'lst': data = NP.copy(lstHA) elif qty == 'lst-day': data = NP.copy(lstday.jd) elif qty == 'days': data = NP.copy(daydata.jd) elif qty == 'dayavg': if 'averaged_closures' in npzdata: data = NP.copy(cp_dayavg) elif qty == 'std_triads': if 'std_dev_triad' in npzdata: data = NP.copy(cp_std_triads) elif qty == 'std_lst': if 'std_dev_lst' in npzdata: data = NP.copy(cp_std_lst) if data is not None: dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9) ################################################################################ def save_CPhase_cross_power_spectrum(xcpdps, outfile): """ ---------------------------------------------------------------------------- Save cross-power spectrum information in a dictionary to a HDF5 file Inputs: xcpdps [dictionary] This dictionary is essentially an output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains one or more of the following keys named 'whole', 'submodel', 'residual', and 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum outfile [string] Full path to the external HDF5 file where the cross- power spectrum information provided in xcpdps will be saved ---------------------------------------------------------------------------- """ if not isinstance(xcpdps, dict): raise TypeError('Input xcpdps must be a dictionary') with h5py.File(outfile, 'w') as fileobj: hdrgrp = fileobj.create_group('header') hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in hdrkeys: dset = hdrgrp.create_dataset(key, data=xcpdps[key]) sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in xcpdps: smplgrp = fileobj.create_group(smplng) for key in sampling_keys: dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key]) for dpool in dpool_keys: if dpool in xcpdps[smplng]: dpoolgrp = smplgrp.create_group(dpool) keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh'] for key in keys: if key in xcpdps[smplng][dpool]: if isinstance(xcpdps[smplng][dpool][key], dict): subgrp = dpoolgrp.create_group(key) for subkey in xcpdps[smplng][dpool][key]: dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey]) else: dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key]) for stat in ['mean', 'median']: if stat in xcpdps[smplng][dpool]: if isinstance(xcpdps[smplng][dpool][stat], list): for ii in range(len(xcpdps[smplng][dpool][stat])): dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value) dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit) else: dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value) dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit) ################################################################################ def read_CPhase_cross_power_spectrum(infile): """ ---------------------------------------------------------------------------- Read information about cross power spectrum from an external HDF5 file into a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum() Input: infile [string] Full path to the external HDF5 file that contains info about cross-power spectrum. Output: xcpdps [dictionary] This dictionary has structure the same as output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains one or more of the following keys named 'whole', 'submodel', 'residual', and 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum outfile [string] Full path to the external HDF5 file where the cross- power spectrum information provided in xcpdps will be saved ---------------------------------------------------------------------------- """ if not isinstance(infile, str): raise TypeError('Input infile must be a string') xcpdps = {} with h5py.File(infile, 'r') as fileobj: hdrgrp = fileobj['header'] hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in hdrkeys: xcpdps[key] = hdrgrp[key].value sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in fileobj: smplgrp = fileobj[smplng] xcpdps[smplng] = {} for key in sampling_keys: xcpdps[smplng][key] = smplgrp[key].value for dpool in dpool_keys: if dpool in smplgrp: xcpdps[smplng][dpool] = {} dpoolgrp = smplgrp[dpool] keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh'] for key in keys: if key in dpoolgrp: if isinstance(dpoolgrp[key], h5py.Group): xcpdps[smplng][dpool][key] = {} for subkey in dpoolgrp[key]: xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value elif isinstance(dpoolgrp[key], h5py.Dataset): xcpdps[smplng][dpool][key] = dpoolgrp[key].value else: raise TypeError('Invalid h5py data type encountered') for stat in ['mean', 'median']: if stat in dpoolgrp: if isinstance(dpoolgrp[stat], h5py.Dataset): valunits = dpoolgrp[stat].attrs['units'] xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits) elif isinstance(dpoolgrp[stat], h5py.Group): xcpdps[smplng][dpool][stat] = [] for diagcomb_ind in range(len(dpoolgrp[stat].keys())): if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]: valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units'] xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)] return xcpdps ################################################################################ def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None): """ ---------------------------------------------------------------------------- Perform incoherent averaging of cross power spectrum along specified axes Inputs: xcpdps [dictionary or list of dictionaries] If provided as a list of dictionaries, each dictionary consists of cross power spectral information coming possible from different sources, and they will be averaged be averaged incoherently. If a single dictionary is provided instead of a list of dictionaries, the said averaging does not take place. Each dictionary is essentially an output of the member function compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum excpdps [dictionary or list of dictionaries] If provided as a list of dictionaries, each dictionary consists of cross power spectral information of subsample differences coming possible from different sources, and they will be averaged be averaged incoherently. This is optional. If not set (default=None), no incoherent averaging happens. If a single dictionary is provided instead of a list of dictionaries, the said averaging does not take place. Each dictionary is essentially an output of the member function compute_power_spectrum_uncertainty() of class ClosurePhaseDelaySpectrum. It has the following key-value structure: 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday' ((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,) 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains a key named 'errinfo' which is a dictionary. It contains information about power spectrum uncertainties obtained from subsample differences. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum uncertainties incoherently estimated over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum uncertainties incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['errinfo']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum diagoffsets [NoneType or dictionary or list of dictionaries] This info is used for incoherent averaging along specified diagonals along specified axes. This incoherent averaging is performed after incoherently averaging multiple cross-power spectra (if any). If set to None, this incoherent averaging is not performed. Many combinations of axes and diagonals can be specified as individual dictionaries in a list. If only one dictionary is specified, then it assumed that only one combination of axes and diagonals is requested. If a list of dictionaries is given, each dictionary in the list specifies a different combination for incoherent averaging. Each dictionary should have the following key-value pairs. The key is the axis number (allowed values are 1, 2, 3) that denote the axis type (1=LST, 2=Days, 3=Triads to be averaged), and the value under they keys is a list or numpy array of diagonals to be averaged incoherently. These axes-diagonal combinations apply to both the inputs xcpdps and excpdps, except axis=2 does not apply to excpdps (since it is made of subsample differences already) and will be skipped. Outputs: A tuple consisting of two dictionaries. The first dictionary contains the incoherent averaging of xcpdps as specified by the inputs, while the second consists of incoherent of excpdps as specified by the inputs. The structure of these dictionaries are practically the same as the dictionary inputs xcpdps and excpdps respectively. The only differences in dictionary structure are: * Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual' /'effinfo']['mean'/'median'] is a list of numpy arrays, where each array in the list corresponds to the dictionary in the list in input diagoffsets that defines the axes-diagonal combination. ---------------------------------------------------------------------------- """ if isinstance(xcpdps, dict): xcpdps = [xcpdps] if not isinstance(xcpdps, list): raise TypeError('Invalid data type provided for input xcpdps') if excpdps is not None: if isinstance(excpdps, dict): excpdps = [excpdps] if not isinstance(excpdps, list): raise TypeError('Invalid data type provided for input excpdps') if len(xcpdps) != len(excpdps): raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values') out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']} out_excpdps = None if excpdps is not None: out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']} for smplng in ['oversampled', 'resampled']: if smplng in xcpdps[0]: out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']} if excpdps is not None: out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']} for dpool in ['whole', 'submodel', 'residual']: if dpool in xcpdps[0][smplng]: out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']} for stat in ['mean', 'median']: if stat in xcpdps[0][smplng][dpool]: out_xcpdps[smplng][dpool][stat] = {} arr = [] diagweights = [] for i in range(len(xcpdps)): arr += [xcpdps[i][smplng][dpool][stat].si.value] arr_units = xcpdps[i][smplng][dpool][stat].si.unit if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict): diagwts = 1.0 diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int) for ax in xcpdps[i][smplng][dpool]['diagweights']: tmp_shape = NP.copy(diagwts_shape) tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape)) elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray): diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights']) else: raise TypeError('Diagonal weights in input must be a dictionary or a numpy array') diagweights += [diagwts] diagweights = NP.asarray(diagweights) arr = NP.asarray(arr) arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units diagweights = NP.nansum(diagweights, axis=0) out_xcpdps[smplng][dpool][stat] = arr out_xcpdps[smplng][dpool]['diagweights'] = diagweights for dpool in ['errinfo']: if dpool in excpdps[0][smplng]: out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']} for stat in ['mean', 'median']: if stat in excpdps[0][smplng][dpool]: out_excpdps[smplng][dpool][stat] = {} arr = [] diagweights = [] for i in range(len(excpdps)): arr += [excpdps[i][smplng][dpool][stat].si.value] arr_units = excpdps[i][smplng][dpool][stat].si.unit if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict): diagwts = 1.0 diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int) for ax in excpdps[i][smplng][dpool]['diagweights']: tmp_shape = NP.copy(diagwts_shape) tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape)) elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray): diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights']) else: raise TypeError('Diagonal weights in input must be a dictionary or a numpy array') diagweights += [diagwts] diagweights = NP.asarray(diagweights) arr = NP.asarray(arr) arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units diagweights = NP.nansum(diagweights, axis=0) out_excpdps[smplng][dpool][stat] = arr out_excpdps[smplng][dpool]['diagweights'] = diagweights if diagoffsets is not None: if isinstance(diagoffsets, dict): diagoffsets = [diagoffsets] if not isinstance(diagoffsets, list): raise TypeError('Input diagoffsets must be a list of dictionaries') for ind in range(len(diagoffsets)): for ax in diagoffsets[ind]: if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)): raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array') diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax]) for smplng in ['oversampled', 'resampled']: if smplng in out_xcpdps: for dpool in ['whole', 'submodel', 'residual']: if dpool in out_xcpdps[smplng]: masks = [] for ind in range(len(diagoffsets)): mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool) mask_agg = None for ax in diagoffsets[ind]: mltdim_slice = [slice(None)] * mask_ones.ndim mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0] mask_tmp = NP.copy(mask_ones) mask_tmp[tuple(mltdim_slice)] = False if mask_agg is None: mask_agg = NP.copy(mask_tmp) else: mask_agg = NP.logical_or(mask_agg, mask_tmp) masks += [NP.copy(mask_agg)] diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights']) out_xcpdps[smplng][dpool]['diagweights'] = [] for stat in ['mean', 'median']: if stat in out_xcpdps[smplng][dpool]: arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value) arr_units = out_xcpdps[smplng][dpool][stat].si.unit out_xcpdps[smplng][dpool][stat] = [] for ind in range(len(diagoffsets)): masked_diagwts = MA.array(diagwts, mask=masks[ind]) axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]]) out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units] if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets): out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)] if excpdps is not None: for smplng in ['oversampled', 'resampled']: if smplng in out_excpdps: for dpool in ['errinfo']: if dpool in out_excpdps[smplng]: masks = [] for ind in range(len(diagoffsets)): mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool) mask_agg = None for ax in diagoffsets[ind]: if ax != 2: mltdim_slice = [slice(None)] * mask_ones.ndim mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0] mask_tmp = NP.copy(mask_ones) mask_tmp[tuple(mltdim_slice)] = False if mask_agg is None: mask_agg = NP.copy(mask_tmp) else: mask_agg = NP.logical_or(mask_agg, mask_tmp) masks += [NP.copy(mask_agg)] diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights']) out_excpdps[smplng][dpool]['diagweights'] = [] for stat in ['mean', 'median']: if stat in out_excpdps[smplng][dpool]: arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value) arr_units = out_excpdps[smplng][dpool][stat].si.unit out_excpdps[smplng][dpool][stat] = [] for ind in range(len(diagoffsets)): masked_diagwts = MA.array(diagwts, mask=masks[ind]) axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2]) out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units] if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets): out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)] return (out_xcpdps, out_excpdps) ################################################################################ def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'): """ ---------------------------------------------------------------------------- Averages the power spectrum incoherently by binning in bins of k. Returns the power spectrum in units of both standard power spectrum and \Delta^2 Inputs: xcpdps [dictionary] A dictionary that contains the incoherent averaged power spectrum along LST and/or triads axes. This dictionary is essentially the one(s) returned as the output of the function incoherent_cross_power_spectrum_average() kbins [NoneType, list or numpy array] Bins in k. If set to None (default), it will be determined automatically based on the inputs in num_kbins, and kbintype. If num_kbins is None and kbintype='linear', the negative and positive values of k are folded into a one-sided power spectrum. In this case, the bins will approximately have the same resolution as the k-values in the input power spectrum for all the spectral windows. num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is set to None. If kbintype is set to 'linear', the negative and positive values of k are folded into a one-sided power spectrum. In this case, the bins will approximately have the same resolution as the k-values in the input power spectrum for all the spectral windows. kbintype [string] Specifies the type of binning, used only if kbins is set to None. Accepted values are 'linear' and 'log' for linear and logarithmic bins respectively. Outputs: Dictionary containing the power spectrum information. At the top level, it contains keys specifying the sampling to be 'oversampled' or 'resampled'. Under each of these keys is another dictionary containing the following keys: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,). 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' or one key named 'errinfo' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. 'errinfo' contains power spectrum information about the subsample differences. There is also another dictionary under key 'kbininfo' that contains information about k-bins. These dictionaries contain the following keys and values: 'whole'/'submodel'/'residual'/'errinfo' [dictionary] It contains the following keys and values: 'mean' [dictionary] Delay power spectrum information under the 'mean' statistic incoherently obtained by averaging the input power spectrum in bins of k. It contains output power spectrum expressed as two quantities each of which is a dictionary with the following key-value pairs: 'PS' [list of numpy arrays] Standard power spectrum in units of 'K2 Mpc3'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'Del2' [list of numpy arrays] power spectrum in Delta^2 units of 'K2'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'median' [dictionary] Delay power spectrum information under the 'median' statistic incoherently obtained by averaging the input power spectrum in bins of k. It contains output power spectrum expressed as two quantities each of which is a dictionary with the following key-value pairs: 'PS' [list of numpy arrays] Standard power spectrum in units of 'K2 Mpc3'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'Del2' [list of numpy arrays] power spectrum in Delta^2 units of 'K2'. Each numpy array in the list maps to a specific combination of axes and axis diagonals chosen for incoherent averaging in earlier processing such as in the function incoherent_cross_power_spectrum_average(). The numpy array has a shape similar to the input power spectrum, but that last axis (k-axis) will have a different size that depends on the k-bins that were used in the incoherent averaging along that axis. 'kbininfo' [dictionary] Contains the k-bin information. It contains the following key-value pairs: 'counts' [list] List of numpy arrays where each numpy array in the stores the counts in the determined k-bins. Each numpy array in the list corresponds to a spectral window (redshift subband). The shape of each numpy array is (nkbins,) 'kbin_edges' [list] List of numpy arrays where each numpy array contains the k-bin edges. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nkbins+1,). 'kbinnum' [list] List of numpy arrays containing the bin number under which the k value falls. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nlags,). 'ri' [list] List of numpy arrays containing the reverse indices for each k-bin. Each array in the list corresponds to a spectral window (redshift subband). The shape of each array is (nlags+nkbins+1,). 'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info estimated for the different datapools under different stats and PS definitions. It has the keys 'mean' and 'median' for the mean and median statistic respectively. Each of them contain a dictionary with the following key-value pairs: 'PS' [list] List of numpy arrays where each numpy array contains a standard power spectrum typically in units of 'K2 Mpc3'. Its shape is the same as input power spectrum except the k-axis which now has nkbins number of elements. 'Del2' [list] List of numpy arrays where each numpy array contains a Delta^2 power spectrum typically in units of 'K2'. Its shape is the same as input power spectrum except the k-axis which now has nkbins number of elements. ---------------------------------------------------------------------------- """ if not isinstance(xcpdps, dict): raise TypeError('Input xcpdps must be a dictionary') if kbins is not None: if not isinstance(kbins, (list,NP.ndarray)): raise TypeError('Input kbins must be a list or numpy array') else: if not isinstance(kbintype, str): raise TypeError('Input kbintype must be a string') if kbintype.lower() not in ['linear', 'log']: raise ValueError('Input kbintype must be set to "linear" or "log"') if kbintype.lower() == 'log': if num_kbins is None: num_kbins = 10 psinfo = {} keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday'] for key in keys: psinfo[key] = xcpdps[key] sampling = ['oversampled', 'resampled'] sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length'] dpool_keys = ['whole', 'submodel', 'residual', 'errinfo'] for smplng in sampling: if smplng in xcpdps: psinfo[smplng] = {} for key in sampling_keys: psinfo[smplng][key] = xcpdps[smplng][key] kprll = xcpdps[smplng]['kprll'] lags = xcpdps[smplng]['lags'] eps = 1e-10 if kbins is None: dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1)) if kbintype.lower() == 'linear': bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True) else: bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True) bins_kprll = NP.insert(bins_kprll, 0, -eps) else: bins_kprll = NP.asarray(kbins) num_kbins = bins_kprll.size - 1 psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []} for spw in range(kprll.shape[0]): counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll) counts = counts.astype(NP.int) psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)] psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc] psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)] psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)] for dpool in dpool_keys: if dpool in xcpdps[smplng]: psinfo[smplng][dpool] = {} psinfo[smplng]['kbininfo'][dpool] = {} keys = ['diagoffsets', 'diagweights', 'axesmap'] for key in keys: psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key] for stat in ['mean', 'median']: if stat in xcpdps[smplng][dpool]: psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []} psinfo[smplng]['kbininfo'][dpool][stat] = [] for combi in range(len(xcpdps[smplng][dpool][stat])): outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape) outshape[-1] = num_kbins tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit) tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3) tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc for spw in range(kprll.shape[0]): counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw]) ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw]) print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw)) progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start() for binnum in range(num_kbins): if counts[binnum] > 0: ind_kbin = ri[ri[binnum]:ri[binnum+1]] tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int) k_shape[-1] = -1 tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2) tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) progress.update(binnum+1) progress.finish() psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)] psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)] psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)] return psinfo ################################################################################ class ClosurePhase(object): """ ---------------------------------------------------------------------------- Class to hold and operate on Closure Phase information. It has the following attributes and member functions. Attributes: extfile [string] Full path to external file containing information of ClosurePhase instance. The file is in HDF5 format cpinfo [dictionary] Contains the following top level keys, namely, 'raw', 'processed', and 'errinfo' Under key 'raw' which holds a dictionary, the subkeys include 'cphase' (nlst,ndays,ntriads,nchan), 'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags' (nlst,ndays,ntriads,nchan). Under the 'processed' key are more subkeys, namely, 'native', 'prelim', and optionally 'submodel' and 'residual' each holding a dictionary. Under 'native' dictionary, the subsubkeys for further dictionaries are 'cphase' (masked array: (nlst,ndays,ntriads,nchan)), 'eicp' (complex masked array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked array: (nlst,ndays,ntriads,nchan)). Under 'prelim' dictionary, the subsubkeys for further dictionaries are 'tbins' (numpy array of tbin centers after smoothing), 'dtbins' (numpy array of tbin intervals), 'wts' (masked array: (ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'. The dictionaries under 'eicp' are indexed by keys 'mean' (complex masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (complex masked array: (ntbins,ndays,ntriads,nchan)). The dictionaries under 'cphase' are indexed by keys 'mean' (masked array: (ntbins,ndays,ntriads,nchan)), 'median' (masked array: (ntbins,ndays,ntriads,nchan)), 'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and 'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The last one denotes Median Absolute Deviation. Under 'submodel' dictionary, the subsubkeys for further dictionaries are 'cphase' (masked array: (nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked array: (nlst,ndays,ntriads,nchan)). Under 'residual' dictionary, the subsubkeys for further dictionaries are 'cphase' and 'eicp'. These are dictionaries too. The dictionaries under 'eicp' are indexed by keys 'mean' (complex masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (complex masked array: (ntbins,ndays,ntriads,nchan)). The dictionaries under 'cphase' are indexed by keys 'mean' (masked array: (ntbins,ndays,ntriads,nchan)), and 'median' (masked array: (ntbins,ndays,ntriads,nchan)). Under key 'errinfo', it contains the following keys and values: 'list_of_pair_of_pairs' List of pair of pairs for which differences of complex exponentials have been computed, where the elements are bins of days. The number of elements in the list is ncomb. And each element is a smaller (4-element) list of pair of pairs 'eicp_diff' Difference of complex exponentials between pairs of day bins. This will be used in evaluating noise properties in power spectrum. It is a dictionary with two keys '0' and '1' where each contains the difference from a pair of subsamples. Each of these keys contains a numpy array of shape (nlstbins,ncomb,2,ntriads,nchan) 'wts' Weights in difference of complex exponentials obtained by sum of squares of weights that are associated with the pair that was used in the differencing. It is a dictionary with two keys '0' and '1' where each contains the weights associated It is of shape (nlstbins,ncomb,2,ntriads,nchan) Member functions: __init__() Initialize an instance of class ClosurePhase expicp() Compute and return complex exponential of the closure phase as a masked array smooth_in_tbins() Smooth the complex exponentials of closure phases in LST bins. Both mean and median smoothing is produced. subtract() Subtract complex exponential of the bispectrum phase from the current instance and updates the cpinfo attribute subsample_differencing() Create subsamples and differences between subsamples to evaluate noise properties from the data set. save() Save contents of attribute cpinfo in external HDF5 file ---------------------------------------------------------------------------- """ def __init__(self, infile, freqs, infmt='npz'): """ ------------------------------------------------------------------------ Initialize an instance of class ClosurePhase Inputs: infile [string] Input file including full path. It could be a NPZ with raw data, or a HDF5 file that could contain raw or processed data. The input file format is specified in the input infmt. If it is a NPZ file, it must contain the following keys/files: 'closures' [numpy array] Closure phase (radians). It is of shape (nlst,ndays,ntriads,nchan) 'triads' [numpy array] Array of triad tuples, of shape (ntriads,3) 'flags' [numpy array] Array of flags (boolean), of shape (nlst,ndays,ntriads,nchan) 'last' [numpy array] Array of LST for each day (CASA units which is MJD+6713). Shape is (nlst,ndays) 'days' [numpy array] Array of days, shape is (ndays,) 'averaged_closures' [numpy array] optional array of closure phases averaged across days. Shape is (nlst,ntriads,nchan) 'std_dev_lst' [numpy array] optional array of standard deviation of closure phases across days. Shape is (nlst,ntriads,nchan) 'std_dev_triads' [numpy array] optional array of standard deviation of closure phases across triads. Shape is (nlst,ndays,nchan) freqs [numpy array] Frequencies (in Hz) in the input. Size is nchan. infmt [string] Input file format. Accepted values are 'npz' (default) and 'hdf5'. ------------------------------------------------------------------------ """ if not isinstance(infile, str): raise TypeError('Input infile must be a string') if not isinstance(freqs, NP.ndarray): raise TypeError('Input freqs must be a numpy array') freqs = freqs.ravel() if not isinstance(infmt, str): raise TypeError('Input infmt must be a string') if infmt.lower() not in ['npz', 'hdf5']: raise ValueError('Input infmt must be "npz" or "hdf5"') if infmt.lower() == 'npz': infilesplit = infile.split('.npz') infile_noext = infilesplit[0] self.cpinfo = loadnpz(infile) # npz2hdf5(infile, infile_noext+'.hdf5') self.extfile = infile_noext + '.hdf5' else: # if not isinstance(infile, h5py.File): # raise TypeError('Input infile is not a valid HDF5 file') self.extfile = infile self.cpinfo = NMO.load_dict_from_hdf5(self.extfile) if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]: raise ValueError('Input frequencies do not match with dimensions of the closure phase data') self.f = freqs self.df = freqs[1] - freqs[0] force_expicp = False if 'processed' not in self.cpinfo: force_expicp = True else: if 'native' not in self.cpinfo['processed']: force_expicp = True self.expicp(force_action=force_expicp) if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['errinfo'] = {} ############################################################################ def expicp(self, force_action=False): """ ------------------------------------------------------------------------ Compute the complex exponential of the closure phase as a masked array Inputs: force_action [boolean] If set to False (default), the complex exponential is computed only if it has not been done so already. Otherwise the computation is forced. ------------------------------------------------------------------------ """ if 'processed' not in self.cpinfo: self.cpinfo['processed'] = {} force_action = True if 'native' not in self.cpinfo['processed']: self.cpinfo['processed']['native'] = {} force_action = True if 'cphase' not in self.cpinfo['processed']['native']: self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags']) force_action = True if not force_action: if 'eicp' not in self.cpinfo['processed']['native']: self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase']) self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags']) else: self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase']) self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags']) ############################################################################ def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None): """ ------------------------------------------------------------------------ Smooth the complex exponentials of closure phases in time bins. Both mean and median smoothing is produced. Inputs: daybinsize [Nonetype or scalar] Day bin size (in days) over which mean and median are estimated across different days for a fixed LST bin. If set to None, it will look for value in input ndaybins. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. ndaybins [NoneType or integer] Number of bins along day axis. Only if daybinsize is set to None. It produces bins that roughly consist of equal number of days in each bin regardless of how much the days in each bin are separated from each other. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. lstbinsize [NoneType or scalar] LST bin size (in seconds) over which mean and median are estimated across the LST. If set to None, no smoothing is performed ------------------------------------------------------------------------ """ if (ndaybins is not None) and (daybinsize is not None): raise ValueError('Only one of daybinsize or ndaybins should be set') if (daybinsize is not None) or (ndaybins is not None): if daybinsize is not None: if not isinstance(daybinsize, (int,float)): raise TypeError('Input daybinsize must be a scalar') dres = NP.diff(self.cpinfo['raw']['days']).min() # in days dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days if daybinsize > dres: daybinsize = NP.clip(daybinsize, dres, dextent) eps = 1e-10 daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize) ndaybins = daybins.size daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps])) if ndaybins > 1: daybinintervals = daybins[1:] - daybins[:-1] daybincenters = daybins[:-1] + 0.5 * daybinintervals else: daybinintervals = NP.asarray(daybinsize).reshape(-1) daybincenters = daybins[0] + 0.5 * daybinintervals counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins) counts = counts.astype(NP.int) # if 'prelim' not in self.cpinfo['processed']: # self.cpinfo['processed']['prelim'] = {} # self.cpinfo['processed']['prelim']['eicp'] = {} # self.cpinfo['processed']['prelim']['cphase'] = {} # self.cpinfo['processed']['prelim']['daybins'] = daybincenters # self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) for binnum in xrange(counts.size): ind_daybin = ri[ri[binnum]:ri[binnum+1]] wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1) eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1))) eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1))) cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data # mask = wts_daybins <= 0.0 # self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask) # self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask) # self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask) # self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask) # self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask) # self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask) # self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask) else: if not isinstance(ndaybins, int): raise TypeError('Input ndaybins must be an integer') if ndaybins <= 0: raise ValueError('Input ndaybins must be positive') days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins) daybincenters = NP.asarray([NP.mean(days) for days in days_split]) daybinintervals = NP.asarray([days.max()-days.min() for days in days_split]) counts = NP.asarray([days.size for days in days_split]) wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1) # mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1) wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1) eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1) eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean)) eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian)) eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1) cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['processed']['prelim']['eicp'] = {} self.cpinfo['processed']['prelim']['cphase'] = {} self.cpinfo['processed']['prelim']['daybins'] = daybincenters self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals mask = wts_daybins <= 0.0 self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask) self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask) self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask) self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask) self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask) rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities if NP.any(rawlst > 24.0): rawlst -= 24.0 if rawlst.shape[0] > 1: # LST bin only if there are multiple LST if lstbinsize is not None: if not isinstance(lstbinsize, (int,float)): raise TypeError('Input lstbinsize must be a scalar') lstbinsize = lstbinsize / 3.6e3 # in hours tres = NP.diff(rawlst[:,0]).min() # in hours textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours eps = 1e-10 if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} no_change_in_lstbins = False if lstbinsize > tres: lstbinsize = NP.clip(lstbinsize, tres, textent) lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize) nlstbins = lstbins.size lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps])) if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) lstbincenters = lstbins[0] + 0.5 * lstbinintervals self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals no_change_in_lstbins = False else: # Perform no binning and keep the current LST resolution, data and weights warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') lstbinsize = tres lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) nlstbins = lstbins.size - 1 if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1] # Ensure that the LST bins are inside the min/max envelope to # error-free interpolation later self.cpinfo['processed']['prelim']['lstbins'][0] += eps self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps no_change_in_lstbins = True counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins) counts = counts.astype(NP.int) if 'wts' not in self.cpinfo['processed']['prelim']: outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) else: outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) wts_lstbins = NP.zeros(outshape) eicp_tmean = NP.zeros(outshape, dtype=NP.complex128) eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128) cp_trms = NP.zeros(outshape) cp_tmad = NP.zeros(outshape) for binnum in xrange(counts.size): if no_change_in_lstbins: ind_lstbin = [binnum] else: ind_lstbin = ri[ri[binnum]:ri[binnum+1]] if 'wts' not in self.cpinfo['processed']['prelim']: indict = self.cpinfo['processed']['native'] else: indict = self.cpinfo['processed']['prelim'] wts_lstbins[binnum,:,:,:] = NP.sum(indict['wts'][ind_lstbin,:,:,:].data, axis=0) if 'wts' not in self.cpinfo['processed']['prelim']: eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(indict['eicp'][ind_lstbin,:,:,:], axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(indict['eicp'][ind_lstbin,:,:,:].real, axis=0) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][ind_lstbin,:,:,:].imag, axis=0))) cp_trms[binnum,:,:,:] = MA.std(indict['cphase'][ind_lstbin,:,:,:], axis=0).data cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data else: eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*indict['cphase']['mean'][ind_lstbin,:,:,:]), axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0))) cp_trms[binnum,:,:,:] = MA.std(indict['cphase']['mean'][ind_lstbin,:,:,:], axis=0).data cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase']['median'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data mask = wts_lstbins <= 0.0 self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_lstbins, mask=mask) if 'eicp' not in self.cpinfo['processed']['prelim']: self.cpinfo['processed']['prelim']['eicp'] = {} if 'cphase' not in self.cpinfo['processed']['prelim']: self.cpinfo['processed']['prelim']['cphase'] = {} self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_tmean, mask=mask) self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_tmedian, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_tmean), mask=mask) self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_tmedian), mask=mask) self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_trms, mask=mask) self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_tmad, mask=mask) # else: # # Perform no binning and keep the current LST resolution, data and weights # warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') # lstbinsize = tres # lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) # nlstbins = lstbins.size - 1 # if nlstbins > 1: # lstbinintervals = lstbins[1:] - lstbins[:-1] # lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals # else: # lstbinintervals = NP.asarray(lstbinsize).reshape(-1) # lstbincenters = lstbins[0] + 0.5 * lstbinintervals # if 'prelim' not in self.cpinfo['processed']: # self.cpinfo['processed']['prelim'] = {} # self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters # self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals if (rawlst.shape[0] <= 1) or (lstbinsize is None): nlstbins = rawlst.shape[0] lstbins = NP.mean(rawlst, axis=1) if 'prelim' not in self.cpinfo['processed']: self.cpinfo['processed']['prelim'] = {} self.cpinfo['processed']['prelim']['lstbins'] = lstbins if lstbinsize is not None: self.cpinfo['processed']['prelim']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1) else: self.cpinfo['processed']['prelim']['dlstbins'] = NP.zeros(1) ############################################################################ def subtract(self, cphase): """ ------------------------------------------------------------------------ Subtract complex exponential of the bispectrum phase from the current instance and updates the cpinfo attribute Inputs: cphase [masked array] Bispectrum phase array as a maked array. It must be of same size as freqs along the axis specified in input axis. Action: Updates 'submodel' and 'residual' keys under attribute cpinfo under key 'processed' ------------------------------------------------------------------------ """ if not isinstance(cphase, NP.ndarray): raise TypeError('Input cphase must be a numpy array') if not isinstance(cphase, MA.MaskedArray): cphase = MA.array(cphase, mask=NP.isnan(cphase)) if not OPS.is_broadcastable(cphase.shape, self.cpinfo['processed']['prelim']['cphase']['median'].shape): raise ValueError('Input cphase has shape incompatible with that in instance attribute') else: minshape = tuple(NP.ones(self.cpinfo['processed']['prelim']['cphase']['median'].ndim - cphase.ndim, dtype=NP.int)) + cphase.shape cphase = cphase.reshape(minshape) # cphase = NP.broadcast_to(cphase, minshape) eicp = NP.exp(1j*cphase) self.cpinfo['processed']['submodel'] = {} self.cpinfo['processed']['submodel']['cphase'] = cphase self.cpinfo['processed']['submodel']['eicp'] = eicp self.cpinfo['processed']['residual'] = {'eicp': {}, 'cphase': {}} for key in ['mean', 'median']: eicpdiff = self.cpinfo['processed']['prelim']['eicp'][key] - eicp eicpratio = self.cpinfo['processed']['prelim']['eicp'][key] / eicp self.cpinfo['processed']['residual']['eicp'][key] = eicpdiff self.cpinfo['processed']['residual']['cphase'][key] = MA.array(NP.angle(eicpratio.data), mask=self.cpinfo['processed']['residual']['eicp'][key].mask) ############################################################################ def subsample_differencing(self, daybinsize=None, ndaybins=4, lstbinsize=None): """ ------------------------------------------------------------------------ Create subsamples and differences between subsamples to evaluate noise properties from the data set. Inputs: daybinsize [Nonetype or scalar] Day bin size (in days) over which mean and median are estimated across different days for a fixed LST bin. If set to None, it will look for value in input ndaybins. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. Must yield greater than or equal to 4 bins ndaybins [NoneType or integer] Number of bins along day axis. Only if daybinsize is set to None. It produces bins that roughly consist of equal number of days in each bin regardless of how much the days in each bin are separated from each other. If both are None, no smoothing is performed. Only one of daybinsize or ndaybins must be set to non-None value. If set, it must be set to greater than or equal to 4 lstbinsize [NoneType or scalar] LST bin size (in seconds) over which mean and median are estimated across the LST. If set to None, no smoothing is performed ------------------------------------------------------------------------ """ if (ndaybins is not None) and (daybinsize is not None): raise ValueError('Only one of daybinsize or ndaybins should be set') if (daybinsize is not None) or (ndaybins is not None): if daybinsize is not None: if not isinstance(daybinsize, (int,float)): raise TypeError('Input daybinsize must be a scalar') dres = NP.diff(self.cpinfo['raw']['days']).min() # in days dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days if daybinsize > dres: daybinsize = NP.clip(daybinsize, dres, dextent) eps = 1e-10 daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize) ndaybins = daybins.size daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps])) if ndaybins >= 4: daybinintervals = daybins[1:] - daybins[:-1] daybincenters = daybins[:-1] + 0.5 * daybinintervals else: raise ValueError('Could not find at least 4 bins along repeating days. Adjust binning interval.') counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins) counts = counts.astype(NP.int) wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128) cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])) for binnum in xrange(counts.size): ind_daybin = ri[ri[binnum]:ri[binnum+1]] wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1) eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1))) eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1))) cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data else: if not isinstance(ndaybins, int): raise TypeError('Input ndaybins must be an integer') if ndaybins < 4: raise ValueError('Input ndaybins must be greater than or equal to 4') days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins) daybincenters = NP.asarray([NP.mean(days) for days in days_split]) daybinintervals = NP.asarray([days.max()-days.min() for days in days_split]) counts = NP.asarray([days.size for days in days_split]) wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1) # mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1) wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1) eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1) eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean)) eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian)) eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1) cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan mask = wts_daybins <= 0.0 wts_daybins = MA.array(wts_daybins, mask=mask) cp_dmean = MA.array(NP.angle(eicp_dmean), mask=mask) cp_dmedian = MA.array(NP.angle(eicp_dmedian), mask=mask) self.cpinfo['errinfo']['daybins'] = daybincenters self.cpinfo['errinfo']['diff_dbins'] = daybinintervals self.cpinfo['errinfo']['wts'] = {'{0}'.format(ind): None for ind in range(2)} self.cpinfo['errinfo']['eicp_diff'] = {'{0}'.format(ind): {} for ind in range(2)} rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities if NP.any(rawlst > 24.0): rawlst -= 24.0 if rawlst.shape[0] > 1: # LST bin only if there are multiple LST if lstbinsize is not None: if not isinstance(lstbinsize, (int,float)): raise TypeError('Input lstbinsize must be a scalar') lstbinsize = lstbinsize / 3.6e3 # in hours tres = NP.diff(rawlst[:,0]).min() # in hours textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours eps = 1e-10 no_change_in_lstbins = False if lstbinsize > tres: lstbinsize = NP.clip(lstbinsize, tres, textent) lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize) nlstbins = lstbins.size lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps])) if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) lstbincenters = lstbins[0] + 0.5 * lstbinintervals self.cpinfo['errinfo']['lstbins'] = lstbincenters self.cpinfo['errinfo']['dlstbins'] = lstbinintervals no_change_in_lstbins = False else: # Perform no binning and keep the current LST resolution warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.') lstbinsize = tres lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize) nlstbins = lstbins.size - 1 if nlstbins > 1: lstbinintervals = lstbins[1:] - lstbins[:-1] else: lstbinintervals = NP.asarray(lstbinsize).reshape(-1) self.cpinfo['errinfo']['dlstbins'] = lstbinintervals self.cpinfo['errinfo']['lstbins'] = lstbins[:-1] # Ensure that the LST bins are inside the min/max envelope to # error-free interpolation later self.cpinfo['errinfo']['lstbins'][0] += eps self.cpinfo['errinfo']['lstbins'][-1] -= eps no_change_in_lstbins = True counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins) counts = counts.astype(NP.int) outshape = (counts.size, wts_daybins.shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) wts_lstbins = NP.zeros(outshape) eicp_tmean = NP.zeros(outshape, dtype=NP.complex128) eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128) cp_trms = NP.zeros(outshape) cp_tmad = NP.zeros(outshape) for binnum in xrange(counts.size): if no_change_in_lstbins: ind_lstbin = [binnum] else: ind_lstbin = ri[ri[binnum]:ri[binnum+1]] wts_lstbins[binnum,:,:,:] = NP.sum(wts_daybins[ind_lstbin,:,:,:].data, axis=0) eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*cp_dmean[ind_lstbin,:,:,:]), axis=0))) eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(cp_dmedian[ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(cp_dmedian[ind_lstbin,:,:,:]), axis=0))) mask = wts_lstbins <= 0.0 wts_lstbins = MA.array(wts_lstbins, mask=mask) eicp_tmean = MA.array(eicp_tmean, mask=mask) eicp_tmedian = MA.array(eicp_tmedian, mask=mask) else: wts_lstbins = MA.copy(wts_daybins) mask = wts_lstbins.mask eicp_tmean = MA.array(NP.exp(1j*NP.angle(NP.exp(1j*cp_dmean))), mask=mask) eicp_tmedian = MA.array(NP.exp(1j*NP.angle(NP.cos(cp_dmedian) + 1j * NP.sin(cp_dmedian))), mask=mask) if (rawlst.shape[0] <= 1) or (lstbinsize is None): nlstbins = rawlst.shape[0] lstbins = NP.mean(rawlst, axis=1) self.cpinfo['errinfo']['lstbins'] = lstbins if lstbinsize is not None: self.cpinfo['errinfo']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1) else: self.cpinfo['errinfo']['dlstbins'] = NP.zeros(1) ncomb = NP.sum(NP.asarray([(ndaybins-i-1)*(ndaybins-i-2)*(ndaybins-i-3)/2 for i in range(ndaybins-3)])).astype(int) diff_outshape = (nlstbins, ncomb, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]) for diffind in range(2): self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['mean'] = MA.empty(diff_outshape, dtype=NP.complex) self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['median'] = MA.empty(diff_outshape, dtype=NP.complex) self.cpinfo['errinfo']['wts']['{0}'.format(diffind)] = MA.empty(diff_outshape, dtype=NP.float) ind = -1 self.cpinfo['errinfo']['list_of_pair_of_pairs'] = [] list_of_pair_of_pairs = [] for i in range(ndaybins-1): for j in range(i+1,ndaybins): for k in range(ndaybins-1): if (k != i) and (k != j): for m in range(k+1,ndaybins): if (m != i) and (m != j): pair_of_pairs = [set([i,j]), set([k,m])] if (pair_of_pairs not in list_of_pair_of_pairs) and (pair_of_pairs[::-1] not in list_of_pair_of_pairs): ind += 1 list_of_pair_of_pairs += [copy.deepcopy(pair_of_pairs)] self.cpinfo['errinfo']['list_of_pair_of_pairs'] += [[i,j,k,m]] for stat in ['mean', 'median']: if stat == 'mean': self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,j,:,:].data - eicp_tmean[:,i,:,:].data), mask=NP.logical_or(eicp_tmean[:,j,:,:].mask, eicp_tmean[:,i,:,:].mask)) self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,m,:,:].data - eicp_tmean[:,k,:,:].data), mask=NP.logical_or(eicp_tmean[:,m,:,:].mask, eicp_tmean[:,k,:,:].mask)) self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,j,:,:].data**2 + wts_lstbins[:,i,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,j,:,:].mask, wts_lstbins[:,i,:,:].mask)) self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,m,:,:].data**2 + wts_lstbins[:,k,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,m,:,:].mask, wts_lstbins[:,k,:,:].mask)) # self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,j,:,:] - eicp_tmean[:,i,:,:]) # self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,m,:,:] - eicp_tmean[:,k,:,:]) # self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,j,:,:]**2 + wts_lstbins[:,i,:,:]**2) # self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,m,:,:]**2 + wts_lstbins[:,k,:,:]**2) else: self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,j,:,:].data - eicp_tmedian[:,i,:,:].data), mask=NP.logical_or(eicp_tmedian[:,j,:,:].mask, eicp_tmedian[:,i,:,:].mask)) self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,m,:,:].data - eicp_tmedian[:,k,:,:].data), mask=NP.logical_or(eicp_tmedian[:,m,:,:].mask, eicp_tmedian[:,k,:,:].mask)) # self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,j,:,:] - eicp_tmedian[:,i,:,:]) # self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,m,:,:] - eicp_tmedian[:,k,:,:]) mask0 = self.cpinfo['errinfo']['wts']['0'] <= 0.0 mask1 = self.cpinfo['errinfo']['wts']['1'] <= 0.0 self.cpinfo['errinfo']['eicp_diff']['0'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['0'][stat], mask=mask0) self.cpinfo['errinfo']['eicp_diff']['1'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['1'][stat], mask=mask1) self.cpinfo['errinfo']['wts']['0'] = MA.array(self.cpinfo['errinfo']['wts']['0'], mask=mask0) self.cpinfo['errinfo']['wts']['1'] = MA.array(self.cpinfo['errinfo']['wts']['1'], mask=mask1) ############################################################################ def save(self, outfile=None): """ ------------------------------------------------------------------------ Save contents of attribute cpinfo in external HDF5 file Inputs: outfile [NoneType or string] Output file (HDF5) to save contents to. If set to None (default), it will be saved in the file pointed to by the extfile attribute of class ClosurePhase ------------------------------------------------------------------------ """ if outfile is None: outfile = self.extfile NMO.save_dict_to_hdf5(self.cpinfo, outfile, compressinfo={'compress_fmt': 'gzip', 'compress_opts': 9}) ################################################################################ class ClosurePhaseDelaySpectrum(object): """ ---------------------------------------------------------------------------- Class to hold and operate on Closure Phase information. It has the following attributes and member functions. Attributes: cPhase [instance of class ClosurePhase] Instance of class ClosurePhase f [numpy array] Frequencies (in Hz) in closure phase spectra df [float] Frequency resolution (in Hz) in closure phase spectra cPhaseDS [dictionary] Possibly oversampled Closure Phase Delay Spectrum information. cPhaseDS_resampled [dictionary] Resampled Closure Phase Delay Spectrum information. Member functions: __init__() Initialize instance of class ClosurePhaseDelaySpectrum FT() Fourier transform of complex closure phase spectra mapping from frequency axis to delay axis. subset() Return triad and time indices to select a subset of processed data compute_power_spectrum() Compute power spectrum of closure phase data. It is in units of Mpc/h. rescale_power_spectrum() Rescale power spectrum to dimensional quantity by converting the ratio given visibility amplitude information average_rescaled_power_spectrum() Average the rescaled power spectrum with physical units along certain axes with inverse variance or regular averaging beam3Dvol() Compute three-dimensional volume of the antenna power pattern along two transverse axes and one LOS axis. ---------------------------------------------------------------------------- """ def __init__(self, cPhase): """ ------------------------------------------------------------------------ Initialize instance of class ClosurePhaseDelaySpectrum Inputs: cPhase [class ClosurePhase] Instance of class ClosurePhase ------------------------------------------------------------------------ """ if not isinstance(cPhase, ClosurePhase): raise TypeError('Input cPhase must be an instance of class ClosurePhase') self.cPhase = cPhase self.f = self.cPhase.f self.df = self.cPhase.df self.cPhaseDS = None self.cPhaseDS_resampled = None ############################################################################ def FT(self, bw_eff, freq_center=None, shape=None, fftpow=None, pad=None, datapool='prelim', visscaleinfo=None, method='fft', resample=True, apply_flags=True): """ ------------------------------------------------------------------------ Fourier transform of complex closure phase spectra mapping from frequency axis to delay axis. Inputs: bw_eff [scalar or numpy array] effective bandwidths (in Hz) on the selected frequency windows for subband delay transform of closure phases. If a scalar value is provided, the same will be applied to all frequency windows freq_center [scalar, list or numpy array] frequency centers (in Hz) of the selected frequency windows for subband delay transform of closure phases. The value can be a scalar, list or numpy array. If a scalar is provided, the same will be applied to all frequency windows. Default=None uses the center frequency from the class attribute named channels shape [string] frequency window shape for subband delay transform of closure phases. Accepted values for the string are 'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW' (for Blackman-Nuttall), and 'bhw' or 'BHW' (for Blackman-Harris). Default=None sets it to 'rect' (rectangular window) fftpow [scalar] the power to which the FFT of the window will be raised. The value must be a positive scalar. Default = 1.0 pad [scalar] padding fraction relative to the number of frequency channels for closure phases. Value must be a non-negative scalar. For e.g., a pad of 1.0 pads the frequency axis with zeros of the same width as the number of channels. After the delay transform, the transformed closure phases are downsampled by a factor of 1+pad. If a negative value is specified, delay transform will be performed with no padding. Default=None sets to padding factor to 1.0 datapool [string] Specifies which data set is to be Fourier transformed visscaleinfo [dictionary] Dictionary containing reference visibilities based on which the closure phases will be scaled to units of visibilities. It contains the following keys and values: 'vis' [numpy array or instance of class InterferometerArray] Reference visibilities from the baselines that form the triad. It can be an instance of class RI.InterferometerArray or a numpy array. If an instance of class InterferometerArray, the baseline triplet must be set in key 'bltriplet' and value in key 'lst' will be ignored. If the value under this key 'vis' is set to a numpy array, it must be of shape (nbl=3, nlst_vis, nchan). In this case the value under key 'bltriplet' will be ignored. The nearest LST will be looked up and applied after smoothing along LST based on the smoothing parameter 'smooth' 'bltriplet' [Numpy array] Will be used in searching for matches to these three baseline vectors if the value under key 'vis' is set to an instance of class InterferometerArray. However, if value under key 'vis' is a numpy array, this key 'bltriplet' will be ignored. 'lst' [numpy array] Reference LST (in hours). It is of shape (nlst_vis,). It will be used only if value under key 'vis' is a numpy array, otherwise it will be ignored and read from the instance of class InterferometerArray passed under key 'vis'. If the specified LST range does not cover the data LST range, those LST will contain NaN in the delay spectrum 'smoothinfo' [dictionary] Dictionary specifying smoothing and/or interpolation parameters. It has the following keys and values: 'op_type' [string] Specifies the interpolating operation. Must be specified (no default). Accepted values are 'interp1d' (scipy.interpolate), 'median' (skimage.filters), 'tophat' (astropy.convolution) and 'gaussian' (astropy.convolution) 'interp_kind' [string (optional)] Specifies the interpolation kind (if 'op_type' is set to 'interp1d'). For accepted values, see scipy.interpolate.interp1d() 'window_size' [integer (optional)] Specifies the size of the interpolating/smoothing kernel. Only applies when 'op_type' is set to 'median', 'tophat' or 'gaussian' The kernel is a tophat function when 'op_type' is set to 'median' or 'tophat'. If refers to FWHM when 'op_type' is set to 'gaussian' resample [boolean] If set to True (default), resample the delay spectrum axis to independent samples along delay axis. If set to False, return the results as is even if they may be be oversampled and not all samples may be independent method [string] Specifies the Fourier transform method to be used. Accepted values are 'fft' (default) for FFT and 'nufft' for non-uniform FFT apply_flags [boolean] If set to True (default), weights determined from flags will be applied. If False, no weights from flagging will be applied, and thus even flagged data will be included Outputs: A dictionary that contains the oversampled (if resample=False) or resampled (if resample=True) delay spectrum information. It has the following keys and values: 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the window function applied. Accepted values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'npad' [scalar] Numbber of zero-padded channels before performing the subband delay transform. 'lags' [numpy array] lags of the subband delay spectra after padding in frequency during the transform. It is of size nlags=nchan+npad if resample=True, where npad is the number of frequency channels padded specified under the key 'npad'. If resample=False, nlags = number of delays after resampling only independent delays. The lags roughly correspond to k_parallel. 'lag_kernel' [numpy array] delay transform of the frequency weights under the key 'freq_wts'. It is of size n_win x nlst x ndays x ntriads x nlags. nlags=nchan+npad if resample=True, where npad is the number of frequency channels padded specified under the key 'npad'. If resample=False, nlags = number of delays after resampling only independent delays. 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff 'whole' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'prelim' key of attribute cpinfo. Contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(nlst,ndays,ntriads,nchan) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'submodel' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'submodel' key of attribute cpinfo. Contains the following keys and values: 'dspec' [numpy array] Delay spectrum of closure phases Shape=(nspw,nlst,ndays,ntriads,nlags) 'residual' [dictionary] Delay spectrum results corresponding to bispectrum phase in 'residual' key of attribute cpinfo after subtracting 'submodel' bispectrum phase from that of 'prelim'. It contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(nlst,ndays,ntriads,nchan) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,nlst,ndays,ntriads,nlags) 'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each of which are dictionaries with the following keys and values: 'twts' [numpy array] Weights for the subsample difference. It is of shape (nlst, ndays, ntriads, nchan) 'mean' [numpy array] Delay spectrum of the subsample difference obtained by using the mean statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) 'median' [numpy array] Delay spectrum of the subsample difference obtained by using the median statistic. It is of shape (nspw, nlst, ndays, ntriads, nlags) ------------------------------------------------------------------------ """ try: bw_eff except NameError: raise NameError('Effective bandwidth must be specified') else: if not isinstance(bw_eff, (int, float, list, NP.ndarray)): raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array') bw_eff = NP.asarray(bw_eff).reshape(-1) if NP.any(bw_eff <= 0.0): raise ValueError('All values in effective bandwidth must be strictly positive') if freq_center is None: freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1) elif isinstance(freq_center, (int, float, list, NP.ndarray)): freq_center = NP.asarray(freq_center).reshape(-1) if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())): raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band') else: raise TypeError('Values(s) of frequency center must be scalar, list or numpy array') if (bw_eff.size == 1) and (freq_center.size > 1): bw_eff = NP.repeat(bw_eff, freq_center.size) elif (bw_eff.size > 1) and (freq_center.size == 1): freq_center = NP.repeat(freq_center, bw_eff.size) elif bw_eff.size != freq_center.size: raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements') if shape is not None: if not isinstance(shape, str): raise TypeError('Window shape must be a string') if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']: raise ValueError('Invalid value for window shape specified.') else: shape = 'rect' if fftpow is None: fftpow = 1.0 else: if not isinstance(fftpow, (int, float)): raise TypeError('Power to raise window FFT by must be a scalar value.') if fftpow < 0.0: raise ValueError('Power for raising FFT of window by must be positive.') if pad is None: pad = 1.0 else: if not isinstance(pad, (int, float)): raise TypeError('pad fraction must be a scalar value.') if pad < 0.0: pad = 0.0 if verbose: print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).') if not isinstance(datapool, str): raise TypeError('Input datapool must be a string') if datapool.lower() not in ['prelim']: raise ValueError('Specified datapool not supported') if visscaleinfo is not None: if not isinstance(visscaleinfo, dict): raise TypeError('Input visscaleinfo must be a dictionary') if 'vis' not in visscaleinfo: raise KeyError('Input visscaleinfo does not contain key "vis"') if not isinstance(visscaleinfo['vis'], RI.InterferometerArray): if 'lst' not in visscaleinfo: raise KeyError('Input visscaleinfo does not contain key "lst"') lst_vis = visscaleinfo['lst'] * 15.0 if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)): raise TypeError('Input visibilities must be a numpy or a masked array') if not isinstance(visscaleinfo['vis'], MA.MaskedArray): visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis'])) vistriad = MA.copy(visscaleinfo['vis']) else: if 'bltriplet' not in visscaleinfo: raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"') blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True) if blrefind.size != 3: blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True) blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True) if blind_next.size + blind.size != 3: raise ValueError('Exactly three baselines were not found in the reference baselines') else: blind = NP.append(blind, blind_missing[blind_next]) blrefind = NP.append(blrefind, blrefind_next) else: blind_missing = [] vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1)) if len(blind_missing) > 0: vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj() vistriad = MA.array(vistriad, mask=NP.isnan(vistriad)) lst_vis = visscaleinfo['vis'].lst viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float) lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0 if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1) wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1) else: vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out) if not isinstance(method, str): raise TypeError('Input method must be a string') if method.lower() not in ['fft', 'nufft']: raise ValueError('Specified FFT method not supported') if not isinstance(apply_flags, bool): raise TypeError('Input apply_flags must be boolean') flagwts = 1.0 visscale = 1.0 if datapool.lower() == 'prelim': if method.lower() == 'fft': freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True) window_loss_factor = 1 / frac_width n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int) ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True) sortind = NP.argsort(ind_channels) ind_freq_center = ind_freq_center[sortind] ind_channels = ind_channels[sortind] dfrequency = dfrequency[sortind] n_window = n_window[sortind] for i,ind_chan in enumerate(ind_channels): window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True) window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2)) ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True) sind = NP.argsort(ind_window_chans) ind_window_chans = ind_window_chans[sind] ind_chans = ind_chans[sind] dfreq = dfreq[sind] window = window[ind_window_chans] window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0))) freq_wts[i,:] = window npad = int(self.f.size * pad) lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True) result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}} if visscaleinfo is not None: visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1) visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1) for dpool in ['errinfo', 'prelim', 'submodel', 'residual']: if dpool.lower() == 'errinfo': for diffind in range(2): if apply_flags: flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data) flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]: eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored # eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: if dpool in self.cPhase.cpinfo['processed']: if apply_flags: flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data) flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan if dpool == 'submodel': eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored # eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: for key in self.cPhase.cpinfo['processed'][dpool]['eicp']: eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data) # eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0)) eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)] if dpool == 'prelim': result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df else: result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df self.cPhaseDS = result if resample: result_resampled = copy.deepcopy(result) downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff) result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear') result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear') for dpool in ['errinfo', 'prelim', 'submodel', 'residual']: if dpool.lower() == 'errinfo': for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']: for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]: result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT') if dpool in self.cPhase.cpinfo['processed']: if dpool == 'submodel': result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT') else: for key in self.cPhase.cpinfo['processed'][datapool]['eicp']: if dpool == 'prelim': result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT') else: result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT') self.cPhaseDS_resampled = result_resampled return result_resampled else: return result ############################################################################ def subset(self, selection=None): """ ------------------------------------------------------------------------ Return triad and time indices to select a subset of processed data Inputs: selection [NoneType or dictionary] Selection parameters based on which triad, LST, and day indices will be returned. If set to None (default), all triad, LST, and day indices will be returned. Otherwise it must be a dictionary with the following keys and values: 'triads' [NoneType or list of 3-element tuples] If set to None (default), indices of all triads are returned. Otherwise, the specific triads must be specified such as [(1,2,3), (1,2,4), ...] and their indices will be returned 'lst' [NoneType, list or numpy array] If set to None (default), indices of all LST are returned. Otherwise must be a list or numpy array containing indices to LST. 'days' [NoneType, list or numpy array] If set to None (default), indices of all days are returned. Otherwise must be a list or numpy array containing indices to days. Outputs: Tuple (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) containing the triad, LST, day, and day-pair (for subsample differences) indices, each as a numpy array ------------------------------------------------------------------------ """ if selection is None: selsection = {} else: if not isinstance(selection, dict): raise TypeError('Input selection must be a dictionary') triads = map(tuple, self.cPhase.cpinfo['raw']['triads']) if 'triads' not in selection: selection['triads'] = triads if selection['triads'] is None: selection['triads'] = triads triad_ind = [triads.index(triad) for triad in selection['triads']] triad_ind = NP.asarray(triad_ind) lst_ind = None if 'lst' not in selection: if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0]) else: if selection['lst'] is None: if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0]) elif isinstance(selection['lst'], (list,NP.ndarray)): if 'prelim' in self.cPhase.cpinfo['processed']: lst_ind = selection['lst'] if NP.any(NP.logical_or(lst_ind < 0, lst_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])): raise ValueError('Input processed lst indices out of bounds') else: raise TypeError('Wrong type for processed lst indices') if lst_ind is None: raise ValueError('LST index selection could not be performed') day_ind = None day_ind_eicpdiff = None if 'days' not in selection: if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1]) if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs'])) else: if selection['days'] is None: if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1]) if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs'])) elif isinstance(selection['days'], (list,NP.ndarray)): if 'prelim' in self.cPhase.cpinfo['processed']: day_ind = selection['days'] if NP.any(NP.logical_or(day_ind < 0, day_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])): raise ValueError('Input processed day indices out of bounds') if 'errinfo' in self.cPhase.cpinfo: day_ind_eicpdiff = [i for i,item in enumerate(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']) if len(set(item)-set(selection['days']))==0] else: raise TypeError('Wrong type for processed day indices') if day_ind is None: raise ValueError('Day index selection could not be performed') return (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) ############################################################################ def compute_power_spectrum(self, cpds=None, selection=None, autoinfo=None, xinfo=None, cosmo=cosmo100, units='K', beamparms=None): """ ------------------------------------------------------------------------ Compute power spectrum of closure phase data. It is in units of Mpc/h Inputs: cpds [dictionary] A dictionary that contains the 'oversampled' (if resample=False) and/or 'resampled' (if resample=True) delay spectrum information. If it is not specified the attributes cPhaseDS['processed'] and cPhaseDS_resampled['processed'] are used. Under each of these keys, it holds a dictionary that has the following keys and values: 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the window function applied. Accepted values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'npad' [scalar] Numbber of zero-padded channels before performing the subband delay transform. 'lags' [numpy array] lags of the subband delay spectra after padding in frequency during the transform. It is of size nlags. The lags roughly correspond to k_parallel. 'lag_kernel' [numpy array] delay transform of the frequency weights under the key 'freq_wts'. It is of size n_bl x n_win x nlags x n_t. 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff 'processed' [dictionary] Contains the following keys and values: 'dspec' [dictionary] Contains the following keys and values: 'twts' [numpy array] Weights from time-based flags that went into time-averaging. Shape=(ntriads,npol,nchan,nt) 'mean' [numpy array] Delay spectrum of closure phases based on their mean across time intervals. Shape=(nspw,npol,nt,ntriads,nlags) 'median' [numpy array] Delay spectrum of closure phases based on their median across time intervals. Shape=(nspw,npol,nt,ntriads,nlags) selection [NoneType or dictionary] Selection parameters based on which triad, LST, and day indices will be returned. If set to None (default), all triad, LST, and day indices will be returned. Otherwise it must be a dictionary with the following keys and values: 'triads' [NoneType or list of 3-element tuples] If set to None (default), indices of all triads are returned. Otherwise, the specific triads must be specified such as [(1,2,3), (1,2,4), ...] and their indices will be returned 'lst' [NoneType, list or numpy array] If set to None (default), indices of all LST are returned. Otherwise must be a list or numpy array containing indices to LST. 'days' [NoneType, list or numpy array] If set to None (default), indices of all days are returned. Otherwise must be a list or numpy array containing indices to days. autoinfo [NoneType or dictionary] Specifies parameters for processing before power spectrum in auto or cross modes. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes that will be averaged coherently before squaring (for auto) or cross-multiplying (for cross) power spectrum. If set to None (default), no axes are averaged coherently. If set to int, list, tuple or numpy array, those axes will be averaged coherently after applying the weights specified under key 'wts' along those axes. 1=lst, 2=days, 3=triads. 'wts' [NoneType/list/numpy array] If not provided (equivalent to setting it to None) or set to None (default), it is set to a one element list which is a one element numpy array of unity. Otherwise, it must be a list of same number of elements as in key 'axes' and each of these must be a numpy broadcast compatible array corresponding to each of the axis specified in 'axes' xinfo [NoneType or dictionary] Specifies parameters for processing cross power spectrum. If set to None, a dictionary will be created with the default values as described below. The dictionary must have the following keys and values: 'axes' [NoneType/int/list/tuple/numpy array] Axes over which power spectrum will be computed incoherently by cross- multiplication. If set to None (default), no cross- power spectrum is computed. If set to int, list, tuple or numpy array, cross-power over those axes will be computed incoherently by cross-multiplication. The cross-spectrum over these axes will be computed after applying the pre- and post- cross-multiplication weights specified in key 'wts'. 1=lst, 2=days, 3=triads. 'collapse_axes' [list] The axes that will be collpased after the cross-power matrix is produced by cross-multiplication. If this key is not set, it will be initialized to an empty list (default), in which case none of the axes is collapsed and the full cross-power matrix will be output. it must be a subset of values under key 'axes'. This will reduce it from a square matrix along that axis to collapsed values along each of the leading diagonals. 1=lst, 2=days, 3=triads. 'dlst' [scalar] LST interval (in mins) or difference between LST pairs which will be determined and used for cross-power spectrum. Will only apply if values under 'axes' contains the LST axis(=1). 'dlst_range' [scalar, numpy array, or NoneType] Specifies the LST difference(s) in minutes that are to be used in the computation of cross-power spectra. If a scalar, only the diagonal consisting of pairs with that LST difference will be computed. If a numpy array, those diagonals consisting of pairs with that LST difference will be computed. If set to None (default), the main diagonal (LST difference of 0) and the first off-main diagonal (LST difference of 1 unit) corresponding to pairs with 0 and 1 unit LST difference are computed. Applies only if key 'axes' contains LST axis (=1). 'avgcov' [boolean] It specifies if the collapse of square covariance matrix is to be collapsed further to a single number after applying 'postX' weights. If not set or set to False (default), this late stage collapse will not be performed. Otherwise, it will be averaged in a weighted average sense where the 'postX' weights would have already been applied during the collapsing operation 'wts' [NoneType or Dictionary] If not set, a default dictionary (see default values below) will be created. It must have the follwoing keys and values: 'preX' [list of numpy arrays] It contains pre-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postX' [list of numpy arrays] It contains post-cross- multiplication weights. It is a list where each element in the list is a numpy array, and the number of elements in the list must match the number of entries in key 'axes'. If 'axes' is set None, 'preX' may be set to a list with one element which is a numpy array of ones. The number of elements in each of the numpy arrays must be numpy broadcastable into the number of elements along that axis in the delay spectrum. 'preXnorm' [boolean] If False (default), no normalization is done after the application of 'preX' weights. If set to True, the delay spectrum will be normalized by the sum of the weights. 'postXnorm' [boolean] If False (default), no normalization is done after the application of postX weights. If set to True, the delay cross power spectrum will be normalized by the sum of the weights. cosmo [instance of cosmology class from astropy] An instance of class FLRW or default_cosmology of astropy cosmology module. Default uses Planck 2015 cosmology, with H0=100 h km/s/Mpc units [string] Specifies the units of output power spectum. Accepted values are 'Jy' and 'K' (default)) and the power spectrum will be in corresponding squared units. Output: Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind', ((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst' ((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday' ((ndays,) array), 'oversampled' and 'resampled' corresponding to whether resample was set to False or True in call to member function FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array corresponding to triad and time indices used in selecting the data. Values under keys 'oversampled' and 'resampled' each contain a dictionary with the following keys and values: 'z' [numpy array] Redshifts corresponding to the band centers in 'freq_center'. It has shape=(nspw,) 'lags' [numpy array] Delays (in seconds). It has shape=(nlags,). 'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to 'lags'. It has shape=(nspw,nlags) 'freq_center' [numpy array] contains the center frequencies (in Hz) of the frequency subbands of the subband delay spectra. It is of size n_win. It is roughly equivalent to redshift(s) 'freq_wts' [numpy array] Contains frequency weights applied on each frequency sub-band during the subband delay transform. It is of size n_win x nchan. 'bw_eff' [numpy array] contains the effective bandwidths (in Hz) of the subbands being delay transformed. It is of size n_win. It is roughly equivalent to width in redshift or along line-of-sight 'shape' [string] shape of the frequency window function applied. Usual values are 'rect' (rectangular), 'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall). 'fftpow' [scalar] the power to which the FFT of the window was raised. The value is be a positive scalar with default = 1.0 'lag_corr_length' [numpy array] It is the correlation timescale (in pixels) of the subband delay spectra. It is proportional to inverse of effective bandwidth. It is of size n_win. The unit size of a pixel is determined by the difference between adjacent pixels in lags under key 'lags' which in turn is effectively inverse of the effective bandwidth of the subband specified in bw_eff It further contains 3 keys named 'whole', 'submodel', and 'residual' each of which is a dictionary. 'whole' contains power spectrum info about the input closure phases. 'submodel' contains power spectrum info about the model that will have been subtracted (as closure phase) from the 'whole' model. 'residual' contains power spectrum info about the closure phases obtained as a difference between 'whole' and 'submodel'. It contains the following keys and values: 'mean' [numpy array] Delay power spectrum incoherently estiamted over the axes specified in xinfo['axes'] using the 'mean' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided but avgcov is False, those axes will be of shape 2*Naxis-1. 'median' [numpy array] Delay power spectrum incoherently averaged over the axes specified in incohax using the 'median' key in input cpds or attribute cPhaseDS['processed']['dspec']. It has shape that depends on the combination of input parameters. See examples below. If both collapse_axes and avgcov are not set, those axes will be replaced with square covariance matrices. If collapse_axes is provided bu avgcov is False, those axes will be of shape 2*Naxis-1. 'diagoffsets' [dictionary] Same keys corresponding to keys under 'collapse_axes' in input containing the diagonal offsets for those axes. If 'avgcov' was set, those entries will be removed from 'diagoffsets' since all the leading diagonal elements have been collapsed (averaged) further. Value under each key is a numpy array where each element in the array corresponds to the index of that leading diagonal. This should match the size of the output along that axis in 'mean' or 'median' above. 'diagweights' [dictionary] Each key is an axis specified in collapse_axes and the value is a numpy array of weights corresponding to the diagonal offsets in that axis. 'axesmap' [dictionary] If covariance in cross-power is calculated but is not collapsed, the number of dimensions in the output will have changed. This parameter tracks where the original axis is now placed. The keys are the original axes that are involved in incoherent cross-power, and the values are the new locations of those original axes in the output. 'nsamples_incoh' [integer] Number of incoherent samples in producing the power spectrum 'nsamples_coh' [integer] Number of coherent samples in producing the power spectrum Examples: (1) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}} Output delay power spectrum has shape (Nspw, Nlst, 1, Ntriads, Nlags) (2) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [], 'wts':{'preX': None, 'preXnorm': False, 'postX': None, 'postXnorm': False}, 'dlst_range': None} Output delay power spectrum has shape (Nspw, 2, Nlst, 1, Ntriads, Ntriads, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4,5]} (3) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': 2, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3], 'dlst_range': [0.0, 1.0, 2.0]} Output delay power spectrum has shape (Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1,2], 3: [4]} (4) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3], 'dlst_range': [1.0, 2.0, 3.0, 4.0]} Output delay power spectrum has shape (Nspw, 4, Ndays, 2*Ntriads-1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range), 3: NP.arange(-Ntriads,Ntriads)}, axesmap = {1: [1], 3: [3]} (5) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3], 'dlst_range': None} Output delay power spectrum has shape (Nspw, 2, Nlst, Ndays, 1, Nlags) diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]} (6) Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags) autoinfo = {'axes': None, 'wts': None} xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []} Output delay power spectrum has shape (Nspw, 1, Ndays, 1, Nlags) diagoffsets = {}, axesmap = {1: [1], 3: [3]} ------------------------------------------------------------------------ """ if not isinstance(units,str): raise TypeError('Input parameter units must be a string') if units.lower() == 'k': if not isinstance(beamparms, dict): raise TypeError('Input beamparms must be a dictionary') if 'freqs' not in beamparms: beamparms['freqs'] = self.f beamparms_orig = copy.deepcopy(beamparms) if autoinfo is None: autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]} elif not isinstance(autoinfo, dict): raise TypeError('Input autoinfo must be a dictionary') if 'axes' not in autoinfo: autoinfo['axes'] = None else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array') else: autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1) if 'wts' not in autoinfo: if autoinfo['axes'] is not None: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes']) else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] else: if autoinfo['axes'] is not None: if not isinstance(autoinfo['wts'], list): raise TypeError('wts in input autoinfo must be a list of numpy arrays') else: if len(autoinfo['wts']) != len(autoinfo['axes']): raise ValueError('Input list of wts must be same as length of autoinfo axes') else: autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] if xinfo is None: xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}} elif not isinstance(xinfo, dict): raise TypeError('Input xinfo must be a dictionary') if 'axes' not in xinfo: xinfo['axes'] = None else: if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)): raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array') else: xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1) if 'wts' not in xinfo: xinfo['wts'] = {} for xkey in ['preX', 'postX']: if xinfo['axes'] is not None: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes']) else: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] xinfo['wts']['preXnorm'] = False xinfo['wts']['postXnorm'] = False else: if xinfo['axes'] is not None: if not isinstance(xinfo['wts'], dict): raise TypeError('wts in input xinfo must be a dictionary') for xkey in ['preX', 'postX']: if not isinstance(xinfo['wts'][xkey], list): raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey)) else: if len(xinfo['wts'][xkey]) != len(xinfo['axes']): raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey)) else: for xkey in ['preX', 'postX']: xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] if 'preXnorm' not in xinfo['wts']: xinfo['wts']['preXnorm'] = False if 'postXnorm' not in xinfo['wts']: xinfo['wts']['postXnorm'] = False if not isinstance(xinfo['wts']['preXnorm'], NP.bool): raise TypeError('preXnorm in input xinfo must be a boolean') if not isinstance(xinfo['wts']['postXnorm'], NP.bool): raise TypeError('postXnorm in input xinfo must be a boolean') if 'avgcov' not in xinfo: xinfo['avgcov'] = False if not isinstance(xinfo['avgcov'], NP.bool): raise TypeError('avgcov under input xinfo must be boolean') if 'collapse_axes' not in xinfo: xinfo['collapse_axes'] = [] if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)): raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array') else: xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1) if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None): if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0: raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection") cohax = autoinfo['axes'] if cohax is None: cohax = [] incohax = xinfo['axes'] if incohax is None: incohax = [] if selection is None: selection = {'triads': None, 'lst': None, 'days': None} else: if not isinstance(selection, dict): raise TypeError('Input selection must be a dictionary') if cpds is None: cpds = {} sampling = ['oversampled', 'resampled'] for smplng in sampling: if smplng == 'oversampled': cpds[smplng] = copy.deepcopy(self.cPhaseDS) else: cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled) triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection) result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['processed']['prelim']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['processed']['prelim']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['processed']['prelim']['daybins'][day_ind], 'day_ind': day_ind, 'dday': self.cPhase.cpinfo['processed']['prelim']['diff_dbins'][day_ind]} dlstbin = NP.mean(self.cPhase.cpinfo['processed']['prelim']['dlstbins']) if 'dlst_range' in xinfo: if xinfo['dlst_range'] is None: dlst_range = None lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated else: dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours) if dlst_range.size == 1: dlst_range = NP.insert(dlst_range, 0, 0.0) lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size])) else: dlst_range = None lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance for smplng in sampling: result[smplng] = {} wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz) z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1 dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff'] dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo) kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags'] rz_los = cosmo.comoving_distance(z) # in Mpc/h drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h if units == 'Jy': jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz) jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz) temperature_from_fluxdensity = 1.0 elif units == 'K': beamparms = copy.deepcopy(beamparms_orig) omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts']) jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz) temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B) else: raise ValueError('Input value for units invalid') factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2 result[smplng]['z'] = z result[smplng]['kprll'] = kprll result[smplng]['lags'] = NP.copy(cpds[smplng]['lags']) result[smplng]['freq_center'] = cpds[smplng]['freq_center'] result[smplng]['bw_eff'] = cpds[smplng]['bw_eff'] result[smplng]['shape'] = cpds[smplng]['shape'] result[smplng]['freq_wts'] = cpds[smplng]['freq_wts'] result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length'] for dpool in ['whole', 'submodel', 'residual']: if dpool in cpds[smplng]: result[smplng][dpool] = {} inpshape = list(cpds[smplng]['whole']['dspec']['mean'].shape) inpshape[1] = lst_ind.size inpshape[2] = day_ind.size inpshape[3] = triad_ind.size if len(cohax) > 0: nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)]) else: nsamples_coh = 1 if len(incohax) > 0: nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)]) nsamples_incoh = nsamples * (nsamples - 1) else: nsamples_incoh = 1 twts_multidim_idx = NP.ix_(lst_ind,day_ind,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1) dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan) max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2))) select_chan = NP.argmax(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2))) twts = NP.copy(cpds[smplng]['whole']['dspec']['twts'].data[:,:,:,[select_chan]]) # shape=(nlst,ndays,ntriads,nlags=1) if nsamples_coh > 1: awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int)) awts = NP.ones(awts_shape, dtype=NP.complex) awts_shape = NP.asarray(awts_shape) for caxind,caxis in enumerate(cohax): curr_awts_shape = NP.copy(awts_shape) curr_awts_shape[caxis] = -1 awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape)) for stat in ['mean', 'median']: if dpool == 'submodel': dspec =
NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx])
numpy.copy
#!/usr/bin/env python # coding: utf-8 from evidently.analyzers.base_analyzer import Analyzer import pandas as pd from pandas.api.types import is_numeric_dtype import numpy as np from scipy.stats import ks_2samp, chisquare, probplot from sklearn import metrics class RegressionPerformanceAnalyzer(Analyzer): def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping): result = dict() if column_mapping: date_column = column_mapping.get('datetime') id_column = column_mapping.get('id') target_column = column_mapping.get('target') prediction_column = column_mapping.get('prediction') num_feature_names = column_mapping.get('numerical_features') target_names = column_mapping.get('target_names') if num_feature_names is None: num_feature_names = [] else: num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])] cat_feature_names = column_mapping.get('categorical_features') if cat_feature_names is None: cat_feature_names = [] else: cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])] else: date_column = 'datetime' if 'datetime' in reference_data.columns else None id_column = None target_column = 'target' if 'target' in reference_data.columns else None prediction_column = 'prediction' if 'prediction' in reference_data.columns else None utility_columns = [date_column, id_column, target_column, prediction_column] num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns)) cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns)) target_names = None result["utility_columns"] = {'date':date_column, 'id':id_column, 'target':target_column, 'prediction':prediction_column} result["cat_feature_names"] = cat_feature_names result["num_feature_names"] = num_feature_names result['metrics'] = {} if target_column is not None and prediction_column is not None: reference_data.replace([np.inf, -np.inf], np.nan, inplace=True) reference_data.dropna(axis=0, how='any', inplace=True) #calculate quality metrics me = np.mean(reference_data[prediction_column] - reference_data[target_column]) sde =
np.std(reference_data[prediction_column] - reference_data[target_column], ddof = 1)
numpy.std
# # Copyright (c) 2015, <NAME> # All rights reserved. # import numpy as np from triangulum.utils import aabb from triangulum.third_party import transformations def norm2(a): return (a * a).sum(-1) def norm(a): return np.sqrt(norm2(a)) def normalize(a): return a / norm(a) def homogenize(a, w=1.0): """ Example: a=[ [a00, a01], [a10, a11], [a20, a21] ], w=1 -> result=[ [a00, a01, w], [a10, a11, w], [a20, a21, w] ] """ return np.hstack([a, np.full((len(a), 1), w, a.dtype)]) def homo_translate(matrix, points): points = np.array(points) points_list = np.atleast_2d(points) if points_list.shape != points.shape: single_input = True else: single_input = False points = points_list if points.shape[-1] < matrix.shape[1]: points = homogenize(points) p = np.dot(points, matrix.T) p = p[:, :-1] / p[:, -1, np.newaxis] if single_input: return p[0] else: return p def scale_matrix(s, d=2): if np.isscalar(s): s = np.array([s]*d) return np.diag(np.hstack([s, 1.0])) def rotate_matrix2d(alpha): return np.array([[np.cos(alpha), -np.sin(alpha), 0], [np.sin(alpha), np.cos(alpha), 0], [ 0, 0, 1]]) # def apply_matrix_to(matrix, indicies, dim): TODO: implement # n, m = matrix.shape # assert n == m # # indicies = list(indicies) # for i in range(n): # if i not in indicies: # indicies.append(i) # # pre_permutation = np.zeros((n, n), np.int32) # for i, j in enumerate(indicies): # pre_permutation[i, j] = 1 # # return np.dot(np.linalg.inv(pre_permutation), np.dot(matrix, pre_permutation)) def look_at_matrix(eye, target, up=(0, 0, 1), right=None): """ Camera frustum looks along -Z axis. See gluLookAt. """ # TODO: review forward = np.float64(target) - eye forward = normalize(forward) if np.allclose(target[:2], eye[:2]) and up[2] == 1: if right is not None: right = normalize(right) else: right = normalize(np.array([1, 0, 0])) else: right = normalize(np.cross(forward, up)) down = np.cross(forward, right) R = np.float64([right, -down, -forward]) tvec = -np.dot(R, eye) return np.float32(np.vstack([np.column_stack([R, tvec]), [0, 0, 0, 1]])) def ortho_matrix(aspect, near, far, width): """ Camera frustum looks along -Z axis. Result frustum camera looks along -Z axis, like in OpenGL. """ height = aspect * width P = transformations.clip_matrix(-width/2, width/2, -height/2, height/2, near, far, perspective=False) P = np.dot(P, scale_matrix([1, 1, -1])) return np.float32(P) def perspective_matrix(aspect, near, far, fov_h=45): """ Camera frustum looks along -Z axis. Result frustum camera looks along -Z axis, like in OpenGL. """ tan = np.tan(np.radians(fov_h) / 2) right = tan * near left = -right bottom, top = aspect * left, aspect * right P = transformations.clip_matrix(left, right, bottom, top, near, far, perspective=True) P = np.dot(P, scale_matrix([1, 1, -1])) return np.float32(-P) def create_frustum_points(rt_mtx, k_mtx, ratio, frustums_depth=1.0): rt_inv = np.linalg.inv(rt_mtx) camera_corners = homo_translate(np.linalg.inv(k_mtx), aabb.rect_to_quad([[-1.0, -1.0 * ratio], [1.0, 1.0 * ratio]])) corners = np.hstack([camera_corners, [[-1]] * 4]) * frustums_depth frustum_points = homo_translate(rt_inv, np.vstack([[[0, 0, 0]], corners])) return frustum_points def create_points_in_frustum(ps, frustum_points, ratio=1.0): camera, ur, ul, ll, lr = frustum_points result = ul + (ur - ul) * ps[:, 0].reshape(-1, 1) + (ll - ul) * (ps[:, 1].reshape(-1, 1) / ratio) return result def vdot(a, b): """ >>> vdot([1, 0, 0], [0, 1, 0]) array([0, 0, 1]) >>> vdot([1, 0, 0], [0, 0, 1]) array([ 0, -1, 0]) >>> vdot([1, 1, 0], [0, 0, 1]) array([ 1, -1, 0]) """ return np.array([a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]]) def plane_by_points(points): """ >>> plane_by_points([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) array([ 1, 1, 1, -1]) """ a, b, c = np.array(points) ab, ac = b - a, c - a n = vdot(ab, ac) return np.hstack([n, -np.dot(n, points[0])]) def intersect_plane_line(plane, line_v, line_p): """ >>> intersect_plane_line([1, 0, 0, -1], [1, 1, 1], [-1, -1, -1]) array([ 1., 1., 1.]) >>> intersect_plane_line([0, 1, 0, -1], [1, 1, 1], [-1, 0, -1]) array([ 0., 1., 0.]) >>> intersect_plane_line([0, 0, 10, -10], [1, 1, 2], [-1, 0, -1]) array([ 0., 1., 1.]) """ assert len(plane) == 4 assert len(line_v) == len(line_p) == 3 t = - (np.dot(plane,
np.hstack([line_p, 1])
numpy.hstack
import itertools import numpy as np """ MAUCpy ~~~~~~ Contains two equations from Hand and Till's 2001 paper on a multi-class approach to the AUC. The a_value() function is the probabilistic approximation of the AUC found in equation 3, while MAUC() is the pairwise averaging of this value for each of the classes. This is equation 7 in their paper. Source of script: https://gist.github.com/stulacy/672114792371dc13b247 """ def a_value(probabilities, zero_label=0, one_label=1): """ Approximates the AUC by the method described in Hand and Till 2001, equation 3. NB: The class labels should be in the set [0,n-1] where n = # of classes. The class probability should be at the index of its label in the probability list. I.e. With 3 classes the labels should be 0, 1, 2. The class probability for class '1' will be found in index 1 in the class probability list wrapped inside the zipped list with the labels. Args: probabilities (list): A zipped list of the labels and the class probabilities in the form (m = # data instances): [(label1, [p(x1c1), p(x1c2), ... p(x1cn)]), (label2, [p(x2c1), p(x2c2), ... p(x2cn)]) ... (labelm, [p(xmc1), p(xmc2), ... (pxmcn)]) ] zero_label (optional, int): The label to use as the class '0'. Must be an integer, see above for details. one_label (optional, int): The label to use as the class '1'. Must be an integer, see above for details. Returns: The A-value as a floating point. """ # Obtain a list of the probabilities for the specified zero label class expanded_points = [(instance[0], instance[1][zero_label]) for instance in probabilities if instance[0] == zero_label or instance[0] == one_label] sorted_ranks = sorted(expanded_points, key=lambda x: x[1]) n0 = sum(1 for point in sorted_ranks if point[0] == zero_label) n1 = sum(1 for point in sorted_ranks if point[0] == one_label) sum_ranks = sum(index+1 for index, point in enumerate(sorted_ranks) if point[0] == zero_label) # Add 1 as ranks are one-based return (sum_ranks - n0*(n0+1) / 2.0) / float(n0 * n1) # Eqn 3 def MAUC(data, num_classes=None): """ Calculates the MAUC over a set of multi-class probabilities and their labels. This is equation 7 in Hand and Till's 2001 paper. NB: The class labels should be in the set [0,n-1] where n = # of classes. The class probability should be at the index of its label in the probability list. I.e. With 3 classes the labels should be 0, 1, 2. The class probability for class '1' will be found in index 1 in the class probability list wrapped inside the zipped list with the labels. Args: data (list): A zipped list (NOT A GENERATOR) of the labels and the class probabilities in the form (m = # data instances): [(label1, [p(x1c1), p(x1c2), ... p(x1cn)]), (label2, [p(x2c1), p(x2c2), ... p(x2cn)]) ... (labelm, [p(xmc1), p(xmc2), ... (pxmcn)]) ] num_classes (int): The number of classes in the dataset - 1. Returns: The MAUC as a floating point value. """ if num_classes is None: num_classes = len(data[0][1]) - 1 # Have to take average of A value with both classes acting as label 0 as this # gives different outputs for more than 2 classes sum_avals = sum((a_value(data, zero_label=pairing[0], one_label=pairing[1]) for pairing in itertools.permutations(range(num_classes), r=2))) return sum_avals / float(num_classes * (num_classes-1)) # Eqn 7 def calcBCA(estimLabels, trueLabels, nrClasses): # Balanced Classification Accuracy bcaAll = [] for c0 in range(nrClasses): for c1 in range(c0+1, nrClasses): # c0 = positive class & c1 = negative class TP = np.sum((estimLabels == c0) & (trueLabels == c0)) TN = np.sum((estimLabels == c1) & (trueLabels == c1)) FP = np.sum((estimLabels == c1) & (trueLabels == c0)) FN = np.sum((estimLabels == c0) & (trueLabels == c1)) # sometimes the sensitivity of specificity can be NaN, if the user doesn't forecast one of the classes. # In this case we assume a default value for sensitivity/specificity if (TP+FN) == 0: sensitivity = 0.5 else: sensitivity = TP/(TP+FN) if (TN+FP) == 0: specificity = 0.5 else: specificity = TN/(TN+FP) bcaCurr = 0.5*(sensitivity+specificity) bcaAll += [bcaCurr] # print('bcaCurr %f TP %f TN %f FP %f FN %f' % (bcaCurr, TP, TN, FP, FN)) return np.mean(bcaAll) def calculate_WES(estimates, lowers, uppers, trues): """Weighted Error Score""" coefs = 1 / (uppers - lowers) return np.sum(coefs * np.abs(estimates - trues)) / np.sum(coefs) def calculate_CPA(estimates, lowers, uppers, trues): """Coverage Probability Accuracy for 50% Confidence Interval""" cov_prob =
np.sum((lowers < trues) & (uppers > trues))
numpy.sum
import functools import typing as tp import unittest from dataclasses import dataclass import einops import hypothesis as hp import jax import jax.numpy as jnp import numpy as np from flax import linen from hypothesis import strategies as st import treex as tx BIAS_INITS = ( tx.initializers.zeros, tx.initializers.ones, tx.initializers.normal(), tx.initializers.uniform(), ) KERNEL_INITS = BIAS_INITS + ( tx.initializers.xavier_uniform(), tx.initializers.xavier_normal(), tx.initializers.lecun_uniform(), tx.initializers.lecun_normal(), tx.initializers.kaiming_uniform(), tx.initializers.kaiming_normal(), ) A = tp.TypeVar("A") class TestEmbed: @hp.given( batch_size=st.integers(min_value=1, max_value=32), length=st.integers(min_value=1, max_value=32), num_embeddings=st.integers(min_value=1, max_value=32), training=st.booleans(), features_out=st.integers(min_value=1, max_value=32), # embedding_init=st.sampled_from(KERNEL_INITS), ) @hp.settings(deadline=None, max_examples=20) def test_equivalence( self, batch_size, length, num_embeddings, training, features_out, embedding_init, ): shape = (batch_size, length, length) x =
np.random.randint(num_embeddings, size=shape)
numpy.random.randint
import sys sys.path.append('../') from assignment2.layers import softmax, cross_entropy_loss import numpy as np def l2_regularization(W, reg_strength): ''' Computes L2 regularization loss on weights and its gradient Arguments: W, np array - weights reg_strength - float value Returns: loss, single value - l2 regularization loss gradient, np.array same shape as W - gradient of weight by l2 loss ''' loss = reg_strength * np.trace(np.matmul(W.T, W)) # L2(W) = λ * tr(W.T * W) grad = 2 * reg_strength * W # dL2(W)/dW = 2 * λ * W return loss, grad # L2(W), dL2(W)/dW def softmax_with_cross_entropy(predictions, target_index): ''' Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value ''' probs = softmax(predictions) loss = cross_entropy_loss(probs, target_index) if len(probs.shape) == 1: subtr = np.zeros(probs.shape) subtr[target_index] = 1 dprediction = probs - subtr else: subtr = np.zeros(probs.shape) subtr[range(len(target_index)), target_index] = 1 dprediction = (probs - subtr) / (predictions.shape[0]) return loss, dprediction class Param: ''' Trainable parameter of the model Captures both parameter value and the gradient ''' def __init__(self, value): self.value = value self.grad = np.zeros_like(value) class ReLULayer: def __init__(self): self.X = None def forward(self, X): self.X = X.copy() layer_X = X.copy() layer_X[layer_X < 0] = 0 return layer_X def backward(self, d_out): X_back = self.X.copy() X_back[X_back > 0] = 1 X_back[X_back <= 0] = 0 d_result = X_back * d_out return d_result def params(self): return {} def reset_grad(self): pass class FullyConnectedLayer: def __init__(self, n_input, n_output): self.W = Param(0.001 * np.random.randn(n_input, n_output)) self.B = Param(0.001 * np.random.randn(1, n_output)) self.X = None def forward(self, X): self.X = Param(X.copy()) output = np.dot(self.X.value, self.W.value) + self.B.value return output def backward(self, d_out): self.W.grad = np.dot(self.X.value.T, d_out) self.B.grad = np.array([np.sum(d_out, axis=0)]) d_input = np.dot(d_out, self.W.value.T) return d_input def params(self): return { 'W': self.W, 'B': self.B } def reset_grad(self): self.W.grad = np.zeros_like(self.W.value) self.B.grad = np.zeros_like(self.B.value) class ConvolutionalLayer: def __init__(self, in_channels, out_channels, filter_size, padding): ''' Initializes the layer Arguments: in_channels, int - number of input channels out_channels, int - number of output channels filter_size, int - size of the conv filter padding, int - number of 'pixels' to pad on each side ''' self.filter_size = filter_size self.in_channels = in_channels self.out_channels = out_channels self.W = Param( np.random.randn(filter_size, filter_size, in_channels, out_channels) ) self.B = Param(np.zeros(out_channels)) self.padding = padding self.X = None def forward(self, X): batch_size, height, width, channels = X.shape self.X = X if self.padding: self.X = np.zeros((batch_size, height + 2 * self.padding, width + 2 * self.padding, channels), dtype=X.dtype) self.X[:, self.padding: -self.padding, self.padding: -self.padding, :] = X _, height, width, channels = self.X.shape out_height = height - self.filter_size + 1 out_width = width - self.filter_size + 1 output = [] for y in range(out_height): row = [] for x in range(out_width): x_window = self.X[:, y: y + self.filter_size, x: x + self.filter_size, :] x_window = np.transpose(x_window, axes=[0, 3, 2, 1]) x_window = x_window.reshape((batch_size, self.filter_size ** 2 * channels)) w_window = np.transpose(self.W.value, axes=[2, 0, 1, 3]) w_reshape = w_window.reshape((self.filter_size ** 2 * self.in_channels, self.out_channels)) out = np.dot(x_window, w_reshape) row.append(np.array([out], dtype=self.W.value.dtype).reshape((batch_size, 1, 1, self.out_channels))) output.append(np.dstack(row)) output = np.hstack(output) + self.B.value return output def backward(self, d_out): batch_size, height, width, channels = self.X.shape _, out_height, out_width, out_channels = d_out.shape d_inp = np.zeros(self.X.shape) for y in range(out_height): for x in range(out_width): d_window = d_out[:, y, x, :] x_window = self.X[:, y: y + self.filter_size, x: x + self.filter_size, :] x_window = np.transpose(x_window, axes=[0, 3, 1, 2]) x_window = x_window.reshape((batch_size, self.filter_size ** 2 * channels)) x_transpose = x_window.transpose() w_window = np.transpose(self.W.value, axes=[2, 0, 1, 3]) w_window = w_window.reshape((self.filter_size ** 2 * self.in_channels, self.out_channels)) w_transpose = w_window.transpose() d_w_window = np.dot(x_transpose, d_window) d_w_window = d_w_window.reshape(self.in_channels, self.filter_size, self.filter_size, self.out_channels) d_w_transpose = np.transpose(d_w_window, axes=[2, 1, 0, 3]) self.W.grad += d_w_transpose E = np.ones(shape=(1, batch_size)) B = np.dot(E, d_window) B = B.reshape((d_window.shape[1])) self.B.grad += B d_inp_xy = np.dot(d_window, w_transpose)#d_window.dot(w_window.transpose()) d_inp_xy = d_inp_xy.reshape((batch_size, channels, self.filter_size, self.filter_size)) d_inp_xy = np.transpose(d_inp_xy, axes=[0, 3, 2, 1]) d_inp[:, y: y + self.filter_size, x: x + self.filter_size, :] += d_inp_xy if self.padding: d_inp = d_inp[:, self.padding: -self.padding, self.padding: -self.padding, :] return d_inp def params(self): return {'W': self.W, 'B': self.B } def reset_grad(self): self.W.grad = np.zeros_like(self.W.value) self.B.grad = np.zeros_like(self.B.value) class MaxPoolingLayer: def __init__(self, pool_size, stride): ''' Initializes the max pool Arguments: pool_size, int - area to pool stride, int - step size between pooling windows ''' self.pool_size = pool_size self.stride = stride self.X = None def forward(self, X): batch_size, height, width, channels = X.shape self.X = X out_height = (height - self.pool_size) / self.stride + 1 out_width = (width - self.pool_size) / self.stride + 1 if (not float(out_height).is_integer() and not float(out_width).is_integer()): raise Exception(f"Stride and pool size aren't consistent for {height}, {width}") out = np.zeros([int(batch_size), int(out_height), int(out_width), int(channels)]) # TODO: Implement maxpool forward pass # Hint: Similarly to Conv layer, loop on # output x/y dimension y_1 = 0 for y in range(int(out_height)): x_1 = 0 for x in range(int(out_width)): out[:, y, x, :] += np.amax(self.X[:, y_1:y_1 + self.pool_size, x_1:x_1 + self.pool_size, :], axis=(1, 2)) x_1 += self.stride y_1 += self.stride return out def backward(self, d_out): batch_size, height, width, channels = self.X.shape _, out_height, out_width, channels = d_out.shape in_l =
np.zeros_like(self.X)
numpy.zeros_like
# This simulates determinatally-thinned point processes that have been # fitted to thinned-point process based on the method outlined in the paper # by Blaszczyszyn and Keeler[1], which is essentially the method developed # by Kulesza and Taskar[2]. # # This is the third file (of three files) to run to reproduce results similar # to those presented in the paper by Blaszczyszyn and Keeler[1]. # # The data used for fitting (or training) is stored in the file Subset.mat, # which is generated with the MATLAB file SubsetGenerate.m; see # # https://github.com/hpaulkeeler/DetPoisson_MATLAB # # The fitting paramters are stored locally in the file SubsetFitParam.npz # # This code was originally written by <NAME> in MATLAB; see # https://github.com/hpaulkeeler/DetPoisson_MATLAB # # REQUIREMENTS: # Uses Statistics (and Machine learning) Toolbox for random variable. # # Author: <NAME>, Inria/ENS, Paris, and University of Melbourne, # Melbourne, 2019 # # References: # [1] Blaszczyszyn and Keeler, Determinantal thinning of point processes # with network learning applications, 2018. # [2] Kulesza and Taskar, "Determinantal point processes for machine # learning",Now Publisers, 2012 # [3] <NAME>, Kendall, and Mecke, "Stochastic geometry and its # applications", Wiley. # [4] Baddeley, Rubak and Turner, "Spatial point patterns: Methodology and # applications with R, 2016. # [5] Shirai and Takahashi, "Random point fields associated with certain # Fredholm determinants I -- fermion, poisson and boson point", 2003.12 import numpy as np; #NumPy package for arrays, random number generation, etc import matplotlib.pyplot as plt #for plotting from matplotlib import collections as mc #for plotting line segments from scipy.io import loadmat #for reading mat files from scipy.optimize import minimize #For optimizing from scipy.stats import poisson #for the Poisson probability mass function from funNeighbourL import funNeighbourL from funSimSimpleLDPP import funSimSimpleLDPP plt.close("all"); # close all figures numbSim=10**3; ###START Load up values from MATLAB .mat file START### dataMATLAB=loadmat('Subset.mat'); lambda0=np.double(dataMATLAB['lambda']);#intensity of underlying Poisson PP xx0=np.double(dataMATLAB['xx0']); yy0=np.double(dataMATLAB['yy0']); areaSample=np.double(dataMATLAB['areaSample']); #area of sample window rSub=np.double(dataMATLAB['rSub']); #radius of matern or triangular process lambdaSub=np.double(dataMATLAB['lambdaSub']); #intensity of subset PP windowSample=dataMATLAB['windowSample'][0]; #vector describing window dims choiceModel=np.int(dataMATLAB['choiceModel']); #model number (ie 1,2 or 3) labelModel=str(dataMATLAB['labelModel'][0]); #name/label of model booleDisk=np.int(dataMATLAB['booleDisk'])!=0; #if simulation window is disk #x/y values of all underlying Poisson PPs ppStructTemp=dataMATLAB['ppStructPoisson']; numbSimTotal=ppStructTemp.size; #total number of simulations #extract data for underlying Poisson point processes xxList=[np.concatenate(ppStructTemp[ss][0][0]) for ss in range(numbSimTotal)]; yyList=[np.concatenate(ppStructTemp[ss][0][1]) for ss in range(numbSimTotal)]; ppXYPoisson=[(xxList[ss],yyList[ss])for ss in range(numbSimTotal)]; nList=[np.int(ppStructTemp[ss][0][2]) for ss in range(numbSimTotal)]; nArray=
np.array(nList)
numpy.array
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core from op_test import OpTest import paddle.fluid as fluid SIGMOID_THRESHOLD_MIN = -40 SIGMOID_THRESHOLD_MAX = 13 EXP_MAX_INPUT = 40 np.set_printoptions(threshold=1e6, suppress=True) def sigmoid(x): y = np.copy(x) y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX return 1. / (1. + np.exp(-y)) def tanh(x): y = -2. * x y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT return (2. / (1. + np.exp(y))) - 1. def gru_reference(input, w, num_direction, num_layers): '''Compute bidirection gru for reference.''' seq_len, batch_size, hidden_size = input.shape def step(step_in, pre_h, wu, wr, wc, ru, rr, rc, bx_u, bx_r, bx_c, bh_u, bh_r, bh_c): reset_gate = sigmoid( np.matmul(step_in, wu) + np.matmul(pre_h, ru) + bx_u + bh_u) update_gate = sigmoid(
np.matmul(step_in, wr)
numpy.matmul
# -*- coding: utf-8 -*- """ Created on Tue Jun 12 18:07:05 2018 @author: Denis """ import time import numpy as np def borders_m(data_cube, infos): print('%s : Creating borders masks - start' % time.asctime()) masks = 0*data_cube nan_masks = np.isnan(masks) ag = infos.height bg = 1 cg = infos.large dg = 1 for i in range(infos.n_steps): a = 0 ca = 1 b = infos.height-1 cb = 1 c = 0 cc = 1 d = infos.large-1 cd = 1 cont = 1 testnan = sum(sum(
np.logical_not(nan_masks[i])
numpy.logical_not
from maxcutpy import RandomMaxCut import numpy as np def test_random_initialization(): matrix =
np.array([[0,1,1],[1,0,5],[1,5,0]])
numpy.array
# # EOSManager.py # # SEE ALSO # - util_WriteXMLWithEOS # - gwemlightcurves.KNTable # SERIOUS LIMITATIONS # - EOSFromFile : File i/o for each EOS creation will slow things donw. This command is VERY trivial, so we should be able # to directly create the structure ourselves, using eos_alloc_tabular # https://github.com/lscsoft/lalsuite/blob/master/lalsimulation/src/LALSimNeutronStarEOSTabular.c rosDebug=False import numpy as np import os import sys import lal import lalsimulation as lalsim from scipy.integrate import quad import scipy.interpolate as interp import scipy #import gwemlightcurves.table as gw_eos_table from . import MonotonicSpline as ms C_CGS=2.997925*10**10 # Argh, Monica! DENSITY_CGS_IN_MSQUARED=7.42591549e-25 # g/cm^3 m^2 //GRUnits. Multiply by this to convert from CGS -> 1/m^2 units (_geom) ### ### SERVICE 0: General EOS structure ### class EOSConcrete: """ Class characterizing a specific EOS solution. This structure *SHOULD* - auto-build the mass-radius via a TOV solve - provides ability to query the lambda(m) relationship and (in the future) higher-order multipole moments; etc As many of these features are already provided by lalsimulation, """ def __init__(self,name=None): self.name=name self.eos = None self.eos_fam = None return None def lambda_from_m(self, m): eos_fam = self.eos_fam if m<10**15: m=m*lal.MSUN_SI k2=lalsim.SimNeutronStarLoveNumberK2(m, eos_fam) r=lalsim.SimNeutronStarRadius(m, eos_fam) m=m*lal.G_SI/lal.C_SI**2 lam=2./(3*lal.G_SI)*k2*r**5 dimensionless_lam=lal.G_SI*lam*(1/m)**5 return dimensionless_lam def pressure_density_on_grid_alternate(self,logrho_grid,enforce_causal=False): """ pressure_density_on_grid. Input and output grid units are in SI (rho: kg/m^3; p = N/m^2) Pressure provided by lalsuite (=EOM integration) Density computed by m*n = (epsilon+p)/c^2mn exp(-h), which does NOT rely on lalsuite implementation """ dat_out = np.zeros(len(logrho_grid)) fam = self.eos_fam eos = self.eos npts_internal = 10000 p_internal = np.zeros(npts_internal) rho_internal = np.zeros(npts_internal) epsilon_internal = np.zeros(npts_internal) hmax = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos) if enforce_causal: # strip out everything except the causal part. hmax = lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos) h = np.linspace(0.0001,hmax,npts_internal) for indx in np.arange(npts_internal): p_internal[indx] = lalsim.SimNeutronStarEOSPressureOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10 to get CGS epsilon_internal[indx] =lalsim.SimNeutronStarEOSEnergyDensityOfPseudoEnthalpy(h[indx],eos) # SI. Note factor of C^2 needed to get mass density rho_internal[indx] =np.exp(-h[indx])* (epsilon_internal[indx]+p_internal[indx])/(lal.C_SI**2) # # print epsilon_internal[10],rho_internal[10], p_internal[10], h[10] logp_of_logrho = interp.interp1d(np.log10(rho_internal),np.log10(p_internal),kind='linear',bounds_error=False,fill_value=np.inf) # should change to Monica's spline # print logrho_grid, return logp_of_logrho(logrho_grid) def pressure_density_on_grid(self,logrho_grid,reference_pair=None,enforce_causal=False): """ pressure_density_on_grid. Input and output grid units are in SI (rho: kg/m^3; p = N/m^2) POTENTIAL PROBLEMS OF USING LALSUITE - lalinference_o2 / master: Unless patched, the *rest mass* density is not reliable. To test with the unpatched LI version, use reference_pair to specify a low-density EOS. This matching is highly suboptimal, so preferably test either (a) a patched code or (b) the alternative code below """ dat_out = np.zeros(len(logrho_grid)) fam = self.eos_fam eos = self.eos npts_internal = 10000 p_internal = np.zeros(npts_internal) rho_internal = np.zeros(npts_internal) hmax = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos) if enforce_causal: # strip out everything except the causal part. hmax = lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos) h = np.linspace(0.0001,hmax,npts_internal) for indx in np.arange(npts_internal): rho_internal[indx] = lalsim.SimNeutronStarEOSRestMassDensityOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10^(-3) to get CGS p_internal[indx] = lalsim.SimNeutronStarEOSPressureOfPseudoEnthalpy(h[indx],eos) # SI. Multiply by 10 to get CGS if not (reference_pair is None): indx_match = np.argmin( np.abs(np.log10(p_internal) - np.log10(reference_pair[1]))) # force agreement of densities at target pressure, if requested! Addresses bug /ambiguity in scaling of rest mass estimate; intend to apply in highly nonrelativistic regime delta_rho = np.log10(reference_pair[0]) -np.log10(rho_internal[indx_match]) rho_internal *= np.power(10, delta_rho) # print np.log10(np.c_[rho_internal,p_internal]) logp_of_logrho = interp.interp1d(np.log10(rho_internal),np.log10(p_internal),kind='linear',bounds_error=False,fill_value=np.inf) # should change to Monica's spline # print logrho_grid, return logp_of_logrho(logrho_grid) def test_speed_of_sound_causal(self, test_only_under_mmax=True,fast_test=True): """ Test if EOS satisfies speed of sound. Relies on low-level lalsimulation interpolation routines to get v(h) and as such is not very reliable By DEFAULT, we are testing the part of the EOS that is - at the largest pressure (assuming monotonic sound speed) - associated with the maximum mass NS that is stable We can also test the full table that is provided to us. https://git.ligo.org/lscsoft/lalsuite/blob/lalinference_o2/lalinference/src/LALInference.c#L2513 """ npts_internal = 1000 eos = self.eos fam = self.eos_fam # Largest NS provides largest attained central pressure m_max_SI = self.mMaxMsun*lal.MSUN_SI if not test_only_under_mmax: hmax = lalsim.SimNeutronStarEOSMaxPseudoEnthalpy(eos) else: try: pmax = lalsim.SimNeutronStarCentralPressure(m_max_SI,fam) hmax = lalsim.SimNeutronStarEOSPseudoEnthalpyOfPressure(pmax,eos) except: # gatch gsl interpolation errors for example return False if fast_test: # https://git.ligo.org/lscsoft/lalsuite/blob/lalinference_o2/lalinference/src/LALInference.c#L2513 try: vsmax = lalsim.SimNeutronStarEOSSpeedOfSoundGeometerized(hmax, eos) return vsmax <1.1 except: # catch gsl interpolation errors for example return False else: if rosDebug: print(" performing comprehensive test ") h = np.linspace(0.0001,hmax,npts_internal) # h = np.linspace(0.0001,lalsim.SimNeutronStarEOSMinAcausalPseudoEnthalpy(eos),npts_internal) vs_internal = np.zeros(npts_internal) for indx in np.arange(npts_internal): vs_internal[indx] = lalsim.SimNeutronStarEOSSpeedOfSoundGeometerized(h[indx],eos) if rosDebug: print(h[indx], vs_internal[indx]) return not np.any(vs_internal>1.1) # allow buffer, so we have some threshold ### ### SERVICE 1: lalsimutils structure ### # See https://github.com/lscsoft/lalsuite/tree/master/lalsimulation/src for available types class EOSLALSimulation(EOSConcrete): def __init__(self,name): self.name=name self.eos = None self.eos_fam = None self.mMaxMsun=None eos = lalsim.SimNeutronStarEOSByName(name) fam = lalsim.CreateSimNeutronStarFamily(eos) mmass = lalsim.SimNeutronStarMaximumMass(fam) / lal.MSUN_SI self.eos = eos self.eos_fam = fam self.mMaxMsun = mmass return None ### ### SERVICE 2: EOSFromFile ### # Example directory: EOS_Tables #dirEOSTablesBase = os.environ["EOS_TABLES"] dirLALSimulationBase = os.environ["LALSIMULATION_DATADIR"] # LAL table data ## Add routines to find, parse standard directory of EOS files and load suitable metadata into memory ## Follow framework of NRWaveformCatalogManager3 class EOSFromDataFile(EOSConcrete): """ FromDataFileEquationOfState (just accepts filename...not attempting to parse a catalog) """ def __init__(self,name=None,fname=None): self.name=name self.fname=fname self.eos = None self.eos_fam = None self.mMax = None self.eos, self.eos_fam = self.eos_ls() return None def eos_ls(self): # From Monica, but using code from GWEMLightcurves # https://gwemlightcurves.github.io/_modules/gwemlightcurves/KNModels/table.html """ EOS tables described by Ozel `here <https://arxiv.org/pdf/1603.02698.pdf>`_ and downloadable `here <http://xtreme.as.arizona.edu/NeutronStars/data/eos_tables.tar>`_. LALSim utilizes this tables, but needs some interfacing (i.e. conversion to SI units, and conversion from non monotonic to monotonic pressure density tables) """ obs_max_mass = 2.01 - 0.04 # used print("Checking %s" % self.name) eos_fname = "" if os.path.exists(self.fname): # NOTE: Adapted from code by <NAME> print("Loading from %s" % self.fname) bdens, press, edens = np.loadtxt(self.fname, unpack=True) press *= DENSITY_CGS_IN_MSQUARED edens *= DENSITY_CGS_IN_MSQUARED eos_name = self.name if not np.all(np.diff(press) > 0): keep_idx = np.where(np.diff(press) > 0)[0] + 1 keep_idx =
np.concatenate(([0], keep_idx))
numpy.concatenate
""" Testing array utilities """ import sys import numpy as np from ..arrfuncs import as_native_array, pinv, eigh from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_false, assert_equal, assert_raises NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>' SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<' def test_as_native(): arr = np.arange(5) # native assert_equal(arr.dtype.byteorder, '=') narr = as_native_array(arr) assert_true(arr is narr) sdt = arr.dtype.newbyteorder('s') barr = arr.astype(sdt) assert_equal(barr.dtype.byteorder, SWAPPED_ORDER) narr = as_native_array(barr) assert_false(barr is narr)
assert_array_equal(barr, narr)
numpy.testing.assert_array_equal
# -*- coding: utf-8 -*- """ Copyright (c) 2017 by <NAME> This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: <NAME>, <NAME> This code tests the likelihood framework and present all existing function. """ import numpy as np try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy import unittest from spotpy.likelihoods import LikelihoodError # We use all available likelihood functions. The pydoc of every function tells, if we can add a # parameter `param` to the function which includes model parameter. The `param` must be None or a tuple with values # and names. If `param` is None, the needed values are calculated by the function itself. class TestLikelihood(unittest.TestCase): def setUp(self): np.random.seed(12) self.normal_data, self.normal_comparedata = np.random.normal(1500, 2530, 20), np.random.normal(15, 25, 20) self.binom_data, self.binom_comparedata = np.random.binomial(20, 0.1, 20), np.random.binomial(20, 0.1, 20) self.do_print = True def test_logLikelihood(self): l_normal = spotpy.likelihoods.logLikelihood(self.normal_data, self.normal_comparedata) self.assertGreaterEqual(np.abs(l_normal), 900) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("logLikelihood: " + str(l_normal)) l_binom = spotpy.likelihoods.logLikelihood(self.binom_data, self.binom_comparedata) self.assertGreaterEqual(np.abs(l_binom), 900) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("logLikelihood: " + str(l_binom)) def test_gaussianLikelihoodMeasErrorOut(self): l_normal = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.normal_data, self.normal_comparedata) self.assertGreaterEqual(-40, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("gaussianLikelihoodMeasErrorOut: " + str(l_normal)) l_binom = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(self.binom_data, self.binom_comparedata) self.assertGreaterEqual(-40, l_binom) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("gaussianLikelihoodMeasErrorOut: " + str(l_binom)) def test_gaussianLikelihoodHomoHeteroDataError(self): l_normal = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.normal_data, self.normal_comparedata) self.assertGreaterEqual(5, np.abs(l_normal)) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("gaussianLikelihoodHomoHeteroDataError: " + str(l_normal)) l_binom = spotpy.likelihoods.gaussianLikelihoodHomoHeteroDataError(self.binom_data, self.binom_comparedata) self.assertGreaterEqual(10, np.abs(l_binom)) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("gaussianLikelihoodHomoHeteroDataError: " + str(l_binom)) def test_LikelihoodAR1NoC(self): l_list = [] l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata, params=([0.98], ["likelihood_phi"]))) try: l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata, params=([], []))) except LikelihoodError as e: print("LikelihoodError occurred: " + str(e)) l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.normal_data, self.normal_comparedata, params=([1.1], ["likelihood_phi"]))) l_list.append(spotpy.likelihoods.LikelihoodAR1NoC(self.binom_data, self.binom_data)) for l in l_list: self.assertNotEqual(None, l) if self.do_print: print("LikelihoodAR1NoC: " + str(l)) def test_LikelihoodAR1WithC(self): l_normal_list = [] try: l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata, params=([], []))) except LikelihoodError as e: print("Likelihood Error occurred " + str(e)) l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata, params=([0.98], ["likelihood_phi"]))) l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.normal_data, self.normal_comparedata, params=([1.1], ["likelihood_phi"]))) l_normal_list.append(spotpy.likelihoods.LikelihoodAR1WithC(self.binom_data, self.binom_comparedata)) for l_normal in l_normal_list: self.assertNotEqual(None, l_normal) if self.do_print: print("LikelihoodAR1WithC: " + str(l_normal)) def test_generalizedLikelihoodFunction(self): size = 1000 data, comparedata = np.random.normal(1500, 2530, size), np.random.normal(355, 25, size) param_list = ["likelihood_beta", "likelihood_xi", "likelihood_sigma0", "likelihood_sigma1", "likelihood_phi1", "likelihood_muh"] l_normal_list = [] l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 1, 0.5, 0.567, 0.98, 57.32], param_list))) try: l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([], []))) except LikelihoodError as e: print("Likelihood Error occurred " + str(e)) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([2, 1, 0.5, 0.567, 0.98, 57.32], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 11, 0.5, 0.567, 0.98, 57.32], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 1, 1.5, 0.567, 0.98, 57.32], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 1, 0.5, 1.567, 0.98, 57.32], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 1, 0.5, 0.567, 2.98, 57.32], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 1, 0.5, 0.567, 0.98, 101], param_list))) l_normal_list.append(spotpy.likelihoods.generalizedLikelihoodFunction(data, comparedata, params= ([-0.09, 0.0, 0.5, 0.567, 0.98, 101], param_list))) for l_normal in l_normal_list: self.assertNotEqual(None, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("generalizedLikelihoodFunction: " + str(l_normal)) l_binom = spotpy.likelihoods.generalizedLikelihoodFunction(self.binom_data, self.binom_comparedata) self.assertNotEqual(None, l_binom) self.assertGreaterEqual(-10000, l_binom) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("generalizedLikelihoodFunction: " + str(l_binom)) def test_LaplacianLikelihood(self): l_normal = spotpy.likelihoods.LaplacianLikelihood(self.normal_data, self.normal_comparedata) self.assertNotEqual(None, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("LaplacianLikelihood: " + str(l_normal)) l_binom = spotpy.likelihoods.LaplacianLikelihood(self.binom_data, self.binom_comparedata) self.assertNotEqual(None, l_normal) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("LaplacianLikelihood: " + str(l_binom)) def test_SkewedStudentLikelihoodHomoscedastic(self): l_normal = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.normal_data, self.normal_comparedata) self.assertGreaterEqual(12, np.abs(l_normal)) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHomoscedastic: " + str(l_normal)) l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHomoscedastic(self.binom_data, self.binom_comparedata) self.assertGreaterEqual(17, np.abs(l_binom)) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHomoscedastic: " + str(l_binom)) def test_SkewedStudentLikelihoodHeteroscedastic(self): l_normal_list = [] paramDependencies = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"] l_normal_list.append( spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata, params=([2.4, 0.15, 0.87], paramDependencies))) try: l_normal_list.append( spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata, params=([], []))) except LikelihoodError as e: print("An error occurred: " + str(e)) l_normal_list.append( spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata, params=([1, 0.15, 1.87], paramDependencies))) l_normal_list.append( spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata, params=([1, 0.15, 0.87], paramDependencies))) l_normal_list.append( spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.normal_data, self.normal_comparedata, params=([1, -0.15, 0.87], paramDependencies))) for l_normal in l_normal_list: if not np.isnan(l_normal): self.assertGreaterEqual(-100, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_normal)) l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedastic(self.binom_data, self.binom_comparedata) if not np.isnan(l_binom): self.assertGreaterEqual(-100, l_binom) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHeteroscedastic: " + str(l_binom)) def test_SkewedStudentLikelihoodHeteroscedasticAdvancedARModel(self): l_normal_list = [] params = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"] l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata, params=([4, 43, 0.4], params))) try: l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata, params=([], []))) except LikelihoodError as e: print("Likelihood Error occurred " + str(e)) l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata, params=([4, 43, 2.4], params))) l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata, params=([1, 43, 0.4], params))) l_normal_list.append(spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata, params=([4, -3, 0.4], params))) for l_normal in l_normal_list: self.assertNotEqual(None, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_normal)) l_binom = spotpy.likelihoods.SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( self.normal_data, self.normal_comparedata) self.assertNotEqual(None, l_binom) self.assertEqual(type(np.float(l_binom)), type(np.float(1))) if self.do_print: print("SkewedStudentLikelihoodHeteroscedasticAdvancedARModel: " + str(l_binom)) def test_NoisyABCGaussianLikelihood(self): l_normal = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.normal_data, self.normal_comparedata) self.assertNotEqual(None, l_normal) self.assertEqual(type(np.float(l_normal)), type(np.float(1))) if self.do_print: print("NoisyABCGaussianLikelihood: " + str(l_normal)) l_binom = spotpy.likelihoods.NoisyABCGaussianLikelihood(self.binom_data, self.binom_data, measerror=[0.0]) self.assertNotEqual(None, l_binom) self.assertEqual(type(np.float(l_binom)), type(
np.float(1)
numpy.float
import copy import pdb import numpy as np from scipy import signal from sklearn.preprocessing import normalize from wfdb.processing.basic import get_filter_gain from wfdb.processing.peaks import find_local_peaks from wfdb.io.record import Record class XQRS(object): """ The QRS detector class for the XQRS algorithm. The `XQRS.Conf` class is the configuration class that stores initial parameters for the detection. The `XQRS.detect` method runs the detection algorithm. The process works as follows: - Load the signal and configuration parameters. - Bandpass filter the signal between 5 and 20 Hz, to get the filtered signal. - Apply moving wave integration (MWI) with a Ricker (Mexican hat) wavelet onto the filtered signal, and save the square of the integrated signal. - Conduct learning if specified, to initialize running parameters of noise and QRS amplitudes, the QRS detection threshold, and recent R-R intervals. If learning is unspecified or fails, use default parameters. See the docstring for the `_learn_init_params` method of this class for details. - Run the main detection. Iterate through the local maxima of the MWI signal. For each local maxima: - Check if it is a QRS complex. To be classified as a QRS, it must come after the refractory period, cross the QRS detection threshold, and not be classified as a T-wave if it comes close enough to the previous QRS. If successfully classified, update running detection threshold and heart rate parameters. - If not a QRS, classify it as a noise peak and update running parameters. - Before continuing to the next local maxima, if no QRS was detected within 1.66 times the recent R-R interval, perform backsearch QRS detection. This checks previous peaks using a lower QRS detection threshold. Attributes ---------- sig : 1d ndarray The input ECG signal to apply the QRS detection on. fs : int, float The sampling frequency of the input signal. conf : XQRS.Conf object, optional The configuration object specifying signal configuration parameters. See the docstring of the XQRS.Conf class. Examples -------- >>> import wfdb >>> from wfdb import processing >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0]) >>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs']) >>> xqrs.detect() >>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds]) """ def __init__(self, sig, fs, conf=None): if sig.ndim != 1: raise ValueError('sig must be a 1d numpy array') self.sig = sig self.fs = fs self.sig_len = len(sig) self.conf = conf or XQRS.Conf() self._set_conf() class Conf(object): """ Initial signal configuration object for this QRS detector. Attributes ---------- hr_init : int, float, optional Initial heart rate in beats per minute. Used for calculating recent R-R intervals. hr_max : int, float, optional Hard maximum heart rate between two beats, in beats per minute. Used for refractory period. hr_min : int, float, optional Hard minimum heart rate between two beats, in beats per minute. Used for calculating recent R-R intervals. qrs_width : int, float, optional Expected QRS width in seconds. Used for filter widths indirect refractory period. qrs_thr_init : int, float, optional Initial QRS detection threshold in mV. Use when learning is False, or learning fails. qrs_thr_min : int, float, string, optional Hard minimum detection threshold of QRS wave. Leave as 0 for no minimum. ref_period : int, float, optional The QRS refractory period. t_inspect_period : int, float, optional The period below which a potential QRS complex is inspected to see if it is a T-wave. """ def __init__(self, hr_init=75, hr_max=200, hr_min=25, qrs_width=0.1, qrs_thr_init=0.13, qrs_thr_min=0, ref_period=0.2, t_inspect_period=0.36): if hr_min < 0: raise ValueError("'hr_min' must be >= 0") if not hr_min < hr_init < hr_max: raise ValueError("'hr_min' < 'hr_init' < 'hr_max' must be True") if qrs_thr_init < qrs_thr_min: raise ValueError("qrs_thr_min must be <= qrs_thr_init") self.hr_init = hr_init self.hr_max = hr_max self.hr_min = hr_min self.qrs_width = qrs_width self.qrs_radius = self.qrs_width / 2 self.qrs_thr_init = qrs_thr_init self.qrs_thr_min = qrs_thr_min self.ref_period = ref_period self.t_inspect_period = t_inspect_period def _set_conf(self): """ Set configuration parameters from the Conf object into the detector object. Time values are converted to samples, and amplitude values are in mV. Parameters ---------- N/A Returns ------- N/A """ self.rr_init = 60 * self.fs / self.conf.hr_init self.rr_max = 60 * self.fs / self.conf.hr_min self.rr_min = 60 * self.fs / self.conf.hr_max # Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1 self.qrs_width = int(self.conf.qrs_width * self.fs) self.qrs_radius = int(self.conf.qrs_radius * self.fs) self.qrs_thr_init = self.conf.qrs_thr_init self.qrs_thr_min = self.conf.qrs_thr_min self.ref_period = int(self.conf.ref_period * self.fs) self.t_inspect_period = int(self.conf.t_inspect_period * self.fs) def _bandpass(self, fc_low=5, fc_high=20): """ Apply a bandpass filter onto the signal, and save the filtered signal. Parameters ---------- fc_low : int, float The low frequency cutoff for the filter. fc_high : int, float The high frequency cutoff for the filter. Returns ------- N/A """ self.fc_low = fc_low self.fc_high = fc_high b, a = signal.butter(2, [float(fc_low) * 2 / self.fs, float(fc_high) * 2 / self.fs], 'pass') self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto], axis=0) # Save the passband gain (x2 due to double filtering) self.filter_gain = get_filter_gain(b, a, np.mean([fc_low, fc_high]), self.fs) * 2 def _mwi(self): """ Apply moving wave integration (MWI) with a Ricker (Mexican hat) wavelet onto the filtered signal, and save the square of the integrated signal. The width of the hat is equal to the QRS width. After integration, find all local peaks in the MWI signal. Parameters ---------- N/A Returns ------- N/A """ wavelet_filter = signal.ricker(self.qrs_width, 4) self.sig_i = signal.filtfilt(wavelet_filter, [1], self.sig_f, axis=0) ** 2 # Save the MWI gain (x2 due to double filtering) and the total # gain from raw to MWI self.mwi_gain = get_filter_gain(wavelet_filter, [1], np.mean([self.fc_low, self.fc_high]), self.fs) * 2 self.transform_gain = self.filter_gain * self.mwi_gain self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius) self.n_peaks_i = len(self.peak_inds_i) def _learn_init_params(self, n_calib_beats=8): """ Find a number of consecutive beats and use them to initialize: - recent QRS amplitude - recent noise amplitude - recent R-R interval - QRS detection threshold The learning works as follows: - Find all local maxima (largest sample within `qrs_radius` samples) of the filtered signal. - Inspect the local maxima until `n_calib_beats` beats are found: - Calculate the cross-correlation between a Ricker wavelet of length `qrs_width`, and the filtered signal segment centered around the local maximum. - If the cross-correlation exceeds 0.6, classify it as a beat. - Use the beats to initialize the previously described parameters. - If the system fails to find enough beats, the default parameters will be used instead. See the docstring of `XQRS._set_default_init_params` for details. Parameters ---------- n_calib_beats : int, optional Number of calibration beats to detect for learning Returns ------- N/A """ if self.verbose: print('Learning initial signal parameters...') last_qrs_ind = -self.rr_max qrs_inds = [] qrs_amps = [] noise_amps = [] ricker_wavelet = signal.ricker(self.qrs_radius * 2, 4).reshape(-1,1) # Find the local peaks of the signal. peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius) # Peak numbers at least qrs_width away from signal boundaries peak_nums_r = np.where(peak_inds_f > self.qrs_width)[0] peak_nums_l = np.where(peak_inds_f <= self.sig_len - self.qrs_width)[0] # Skip if no peaks in range if (not peak_inds_f.size or not peak_nums_r.size or not peak_nums_l.size): if self.verbose: print('Failed to find %d beats during learning.' % n_calib_beats) self._set_default_init_params() return # Go through the peaks and find QRS peaks and noise peaks. # only inspect peaks with at least qrs_radius around either side for peak_num in range(peak_nums_r[0], peak_nums_l[-1]): i = peak_inds_f[peak_num] # Calculate cross-correlation between the filtered signal # segment and a Ricker wavelet # Question: should the signal be squared? Case for inverse QRS # complexes sig_segment = normalize((self.sig_f[i - self.qrs_radius: i + self.qrs_radius]).reshape(-1, 1), axis=0) xcorr = np.correlate(sig_segment[:, 0], ricker_wavelet[:,0]) # Classify as QRS if xcorr is large enough if xcorr > 0.6 and i-last_qrs_ind > self.rr_min: last_qrs_ind = i qrs_inds.append(i) qrs_amps.append(self.sig_i[i]) else: noise_amps.append(self.sig_i[i]) if len(qrs_inds) == n_calib_beats: break # Found enough calibration beats to initialize parameters if len(qrs_inds) == n_calib_beats: if self.verbose: print('Found %d beats during learning.' % n_calib_beats + ' Initializing using learned parameters') # QRS amplitude is most important. qrs_amp = np.mean(qrs_amps) # Set noise amplitude if found if noise_amps: noise_amp = np.mean(noise_amps) else: # Set default of 1/10 of QRS amplitude noise_amp = qrs_amp / 10 # Get R-R intervals of consecutive beats, if any. rr_intervals =
np.diff(qrs_inds)
numpy.diff
#!/usr/bin/env python # # Copyright (C) 2019 # <NAME> # Centre of Excellence Cognitive Interaction Technology (CITEC) # Bielefeld University # # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions # and the following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input from tensorflow.keras.models import Model from tensorflow.keras import backend as K from sklearn.decomposition import PCA import os import numpy as np import pickle as pkl from PIL import Image from a2vq.src.helper import get_files_of_type def preprocess_input(x, dim_ordering='default'): if dim_ordering == 'default': dim_ordering = K.image_dim_ordering() assert dim_ordering in {'tf', 'th'} if dim_ordering == 'th': x[:, 0, :, :] -= 103.939 x[:, 1, :, :] -= 116.779 x[:, 2, :, :] -= 123.68 # 'RGB'->'BGR' x = x[:, ::-1, :, :] else: x[:, :, :, 0] -= 103.939 x[:, :, :, 1] -= 116.779 x[:, :, :, 2] -= 123.68 # 'RGB'->'BGR' x = x[:, :, :, ::-1] return x def create_arbitrary_image_ds(image_dir, output_dir, image_list=None): deep_feats, image_list, img_tuple_resized = feature_extraction_of_arbitrary_image_ds(image_dir,image_list) with open(os.path.join(output_dir,'features.pkl'),'wb') as f: pkl.dump(deep_feats,f) with open(os.path.join(output_dir,'filenames.pkl'),'wb') as f: pkl.dump(image_list,f) with open(os.path.join(output_dir,'images.pkl'),'wb') as f: pkl.dump(img_tuple_resized,f) print('done') def feature_extraction_of_arbitrary_image_ds(image_dir, image_list=None): if image_list is None: image_list = get_files_of_type(image_dir,'jpg') image_paths = [] for i in image_list: image_paths.append(os.path.join(image_dir, i)) img_tuple = read_images(image_paths) img_tuple_rgb = convert_image_tuple_to_rgb(img_tuple) #img_tuple_resized = resize_image_tuple(img_tuple_rgb,size=(299,299,3)) img_tuple_resized = resize_image_tuple_to_max_edge_length(img_tuple_rgb, 80) deep_feats = get_deep_feats(img_tuple_rgb,'vgg16') return deep_feats, image_list, img_tuple_resized def get_deep_feats(imgs, use_model='vgg16', pca_dim=None): model, imgsize = get_model(use_model) if pca_dim != None: pca = PCA(pca_dim) rtn_feats = [] for img in imgs: img_data = preprocess_image(img,imgsize) features = model.predict(img_data).flatten() rtn_feats.append(features) rtn_feats = np.array(rtn_feats) if pca_dim != None: rtn_feats = pca.fit_transform(rtn_feats) return rtn_feats def preprocess_image(img, imgsize): img_load = np.array(Image.fromarray(img).resize(imgsize[:2])) img_data = image.img_to_array(img_load) img_data =
np.expand_dims(img_data, axis=0)
numpy.expand_dims
# -*- coding: utf-8 -*- """ Created on Wed Jan 16 11:27:05 2019 @author: <NAME> """ """ Quick Start In order to use this program, you will need to do these things: * Specify a value for the variable 'server' to indicate whether local files will be input for, perhaps, debugging mode or file paths on a remote server will be used. * Specify appropriate values for the variables 'path1' and 'files1' for input file paths. * Determine whether the variable 'files1Short' is desired. This was based on the authors file-naming conventions and will not be appropriate in all circumstances. Other parts of the program will need to be revised if this variable is not used for a shorter graph title. * Ensure that inout data is in teh format indicated in comments below. """ """ This Python 3 code performs the following tasks: * Performs statistical tests on hit rate data: - Tests whether the distribution of hits across the four categories is different from a random allocation of hits across categories in proportion to the number of articles in each category in a statistically significant way. - Tests whether categorizing articles along the dimensions of novelty and conventionality, individually, has explanatory power - Test whether the number of hits in each category differs in a statistically significant way from a ransom distribution of hit articles among the categories by binning the remaining three categories together. This mitigates issues that arise in some circumstances when an insuffiicent expeted number of hits prevents a valid analysis in the case of the test outlined in the first bullet point above. * Performs the Spearman Rank Correlation Test between citation_count and all other data columns * Outputs JSON files to be used by a subsequent program to graph the data * Outputs data in a format amenable to inclusion in LaTex file tables """ """ This program requires all of the Python packages below, which are all included with the Anaconda distribution of Python """ import pandas as pd import numpy as np from scipy.stats import spearmanr from scipy.stats import chisquare from scipy.stats import binom import json import re server = True """ This function formats data for output in LaTex format to a specified number of decimal places """ def formFloat (num,places): fStr = '{:.'+str(places)+'f}' num = float(int(float(fStr.format(num))*10**places+0.5))/10**places if num <= 0.025:# or num >= 0.975: return '\\textbf{'+fStr.format(num)+'}' elif num <= 0.05:# or num >= .95: return '\\textit{'+fStr.format(num)+'}' else: return fStr.format(num) """ This function formats data for output in LaTex format It also includes code for a dagger symbol where the number of expected hits was less than the minimum required for a valid statistical test """ def formFloatDagger (num,places): fStr = '{:.'+str(places)+'f}' num[0] = float(int(float(fStr.format(num[0]))*10**places+0.5))/10**places if num[0] <= 0.025: # or num[0] >= 0.975: if num[1] >= 5.0: return '\\textbf{'+fStr.format(num[0])+'}' else: return '$\dagger$ \\textbf{'+fStr.format(num[0])+'} ' elif num[0] <= 0.05: # or num[0] >= .95: if num[1] >= 5.0: return '\\textit{'+fStr.format(num[0])+'}' else: return '$\dagger$ \\textit{'+fStr.format(num[0])+'} ' else: return fStr.format(num[0]) """ This function formats data for output in LaTex format It also permits output of the string 'NA' when a numberical value is not passed to the function. """ def formFloatDaggerNA (num,places): try: fStr = '{:.'+str(places)+'f}' num = float(int(float(fStr.format(num))*10**places+0.5))/10**places if num <= 0.025: # or num >= 0.975: return '\\textbf{'+fStr.format(num)+'}' elif num <= 0.05: # or num >= .95: return '\\textit{'+fStr.format(num)+'}' else: return fStr.format(num) except: return str(num) """ Calculates hit rate except returns 0.0 when the total number of articles in a category is zero to avoid dividing by zero """ def percent(row): if row['total']> 0: return row['hit'] / row['total'] else: return 0.0 """ This if-else block permits an alternate, local file to be input during debugging server is a Boolean variable that, if True, indicates that the path and files in if block are input and, otherwise, the path and files in the else block are input. """ """ Input file format """ """ Input requires the first line to have field names and subsequent comma-delimited text files Data dictionary: * source_id: a unique identifier for an article. We used IDs from the Web of Science under license from Clarivate Analytics, which we cannot disclose. These can be string values (do not enclose in quotes in data file if this is the case). * med: the median z-score of all the citations in the source article * ten: the 10th percentile z-score (left tail) of the citation z-scores * one: the 1st percentile z-score (left tail) of the citation z-scores * citation_count: the number of tiems the source articles was cited Example: source_id,med,ten,one,citation_count 0,4.37535958641463,-0.368176148773802,-1.84767079802106,1 1,8.94701613716861,0.695385836097657,-1.0789085501296,6 2,17.9740470024929,-8.85622661474813,-10.3102229485467,14 """ """ The Boolean variable 'server' controls which paths and files are input """ if server: # settings for production runs on server path1 = '/path_to_remote_data_folder/' files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv','data_1995/imm95_pubwise_zsc_med.csv','data_1995/metab95_pubwise_zsc_med.csv', 'data_1995/ap95_pubwise_zsc_med.csv', \ 'data_1985/d1000_85_pubwise_zsc_med.csv','data_1985/imm85_pubwise_zsc_med.csv','data_1985/metab85_pubwise_zsc_med.csv', 'data_1985/ap85_pubwise_zsc_med.csv', \ 'data_2005/d1000_2005_pubwise_zsc_med.csv', 'data_2005/imm2005_pubwise_zsc_med.csv','data_2005/metab2005_pubwise_zsc_med.csv', 'data_2005/ap2005_pubwise_zsc_med.csv'] else: # settings for local debugging path1 = '/path_to_local_data_folder/' files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv'] """ This statement extracts the filename from the path for succinct identification of the filename """ """ This statement may not be appropriate for alternate file naming conventions """ files1Short = [x.split('/')[-1] for x in files1] """ Extract year and data set topic from filename """ years = [re.search('data_\d{4}',x).group(0).replace('data_','') for x in files1] datasets = [re.sub('\d+','',re.search('/\w+_',x).group(0).split('_')[0].replace('/','')) for x in files1] transDataset = {'imm':'Immunology', 'd':'Web of Science', 'metab':'Metabolism', 'ap':'Applied Physics'} """ These lists are are used for coding results in Latex files, output in JSON files, and create pandas DataFrames within the program """ cols = ['med','ten','one'] catConven = ['LC','HC'] catNovel = ['HN','LN'] catHit = ['hit','non-hit'] countRows = ['LNHC','HNLC','LNLC','HNHC'] countCols = catHit countRowsBin = ['LN','HN','LC','HC'] """ Iterate through the inputted fiels """ for i in range(len(files1)): """ These statements create empty dictionaries for storing results""" binomRes = {} # dictionary for results of 2-category tests Fig2Res = {} # dictionary for results of 4-category tests for data in the form of Uzzi's Fig. 2 Fig2IndRes = {} # dictionary for results of testing each of the 4 categories in Uzzi's Fig. 2 individually graphDic = {} # Dictionary to store visualization data df = pd.read_csv(path1+files1[i]) # read file jsonCounts = json.loads('{}') # JSON string to store the results #dicNewRow = {'file':files1[i]} """ Compute Spearman Rank Correlation Tests on citation_count column with other columns """ dfRes = pd.DataFrame(columns=['file']+cols) # DataFrame for correlation results newRow = [files1[i]] for col in cols: print('Spearman Rank Correlation for '+files1[i]+': Columns '+col+' and '+'citation_count') result = spearmanr(df['citation_count'], df[col]) print(result,'\n\n') newRow.append('Col. '+col+': corr = '+str(result[0]) + ', p = ' + str(result[1])) #dicNewRow[col] = str(result[0]) + ',' + str(result[1]) dfRes.loc[files1[i]] = newRow #['1',1,2,3] #pd.DataFrame.from_dict(dicNewRow, orient='index') #dfRes = pd.concat([dfRes,pd.DataFrame.from_dict(dicNewRow, orient='index')]) """ Set Hits and Novelty thresholds and create new columns in the df DataFrame to store the categorical labels """ citPerc10 = df['citation_count'].quantile(0.9) citPerc5 = df['citation_count'].quantile(0.95) citPerc2 = df['citation_count'].quantile(0.98) citPerc1 = df['citation_count'].quantile(0.99) median = df['med'].median() """ Create DataFrame columns for categorical variables """ df['conven'] =
np.where(df['med']<=median,catConven[0],catConven[1])
numpy.where
#!/usr/bin/env python # -*- coding: utf-8 -*- '''lcproc.py - <NAME> (<EMAIL>) - May 2017 This contains functions that serve as examples for running large batch jobs processing HAT light curves. ''' ############# ## LOGGING ## ############# import logging from datetime import datetime from traceback import format_exc # setup a logger LOGGER = None LOGMOD = __name__ DEBUG = False def set_logger_parent(parent_name): globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD)) def LOGDEBUG(message): if LOGGER: LOGGER.debug(message) elif DEBUG: print('[%s - DBUG] %s' % ( datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), message) ) def LOGINFO(message): if LOGGER: LOGGER.info(message) else: print('[%s - INFO] %s' % ( datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), message) ) def LOGERROR(message): if LOGGER: LOGGER.error(message) else: print('[%s - ERR!] %s' % ( datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), message) ) def LOGWARNING(message): if LOGGER: LOGGER.warning(message) else: print('[%s - WRN!] %s' % ( datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), message) ) def LOGEXCEPTION(message): if LOGGER: LOGGER.exception(message) else: print( '[%s - EXC!] %s\nexception was: %s' % ( datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), message, format_exc() ) ) ############# ## IMPORTS ## ############# import os import os.path import sys try: import cPickle as pickle from cStringIO import StringIO as strio except: import pickle from io import BytesIO as strio import gzip import glob import shutil import multiprocessing as mp from concurrent.futures import ProcessPoolExecutor import base64 import numpy as np import scipy.spatial as sps import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt try: from tqdm import tqdm TQDM = True except: TQDM = False pass # to turn a list of keys into a dict address # from https://stackoverflow.com/a/14692747 from functools import reduce from operator import getitem def dict_get(datadict, keylist): return reduce(getitem, keylist, datadict) ################### ## LOCAL IMPORTS ## ################### # LC reading functions from astrobase.hatsurveys.hatlc import read_and_filter_sqlitecurve, \ read_csvlc, normalize_lcdict_byinst from astrobase.hatsurveys.hplc import read_hatpi_textlc, read_hatpi_pklc from astrobase.astrokep import read_kepler_fitslc, read_kepler_pklc from astrobase import periodbase, checkplot from astrobase.varclass import varfeatures, starfeatures, periodicfeatures from astrobase.lcmath import normalize_magseries, \ time_bin_magseries_with_errs, sigclip_magseries from astrobase.periodbase.kbls import bls_snr from astrobase.checkplot import _pkl_magseries_plot, \ _pkl_phased_magseries_plot, xmatch_external_catalogs, \ _read_checkplot_picklefile, _write_checkplot_picklefile from astrobase.magnitudes import jhk_to_sdssr ############################################# ## MAPS FOR LCFORMAT TO LCREADER FUNCTIONS ## ############################################# def read_pklc(lcfile): ''' This just reads a pickle. ''' try: with open(lcfile,'rb') as infd: lcdict = pickle.load(infd) except UnicodeDecodeError: with open(lcfile,'rb') as infd: lcdict = pickle.load(infd, encoding='latin1') return lcdict # these translate filter operators given as strings to Python operators FILTEROPS = {'eq':'==', 'gt':'>', 'ge':'>=', 'lt':'<', 'le':'<=', 'ne':'!='} # used to figure out which period finder to run given a list of methods PFMETHODS = {'bls':periodbase.bls_parallel_pfind, 'gls':periodbase.pgen_lsp, 'aov':periodbase.aov_periodfind, 'mav':periodbase.aovhm_periodfind, 'pdm':periodbase.stellingwerf_pdm, 'acf':periodbase.macf_period_find, 'win':periodbase.specwindow_lsp} # LC format -> [default fileglob, function to read LC format] LCFORM = { 'hat-sql':[ '*-hatlc.sqlite*', # default fileglob read_and_filter_sqlitecurve, # function to read this LC ['rjd','rjd'], # default timecols to use for period/var ['aep_000','atf_000'], # default magcols to use for period/var ['aie_000','aie_000'], # default errcols to use for period/var False, # default magsarefluxes = False normalize_lcdict_byinst, # default special normalize function ], 'hat-csv':[ '*-hatlc.csv*', read_csvlc, ['rjd','rjd'], ['aep_000','atf_000'], ['aie_000','aie_000'], False, normalize_lcdict_byinst, ], 'hp-txt':[ 'HAT-*tfalc.TF1*', read_hatpi_textlc, ['rjd','rjd'], ['iep1','itf1'], ['ire1','ire1'], False, None, ], 'hp-pkl':[ '*-pklc.pkl*', read_hatpi_pklc, ['rjd','rjd'], ['iep1','itf1'], ['ire1','ire1'], False, None, ], 'kep-fits':[ '*_llc.fits', read_kepler_fitslc, ['time','time'], ['sap.sap_flux','pdc.pdc_sapflux'], ['sap.sap_flux_err','pdc.pdc_sapflux_err'], True, None, ], 'kep-pkl':[ '-keplc.pkl', read_kepler_pklc, ['time','time'], ['sap.sap_flux','pdc.pdc_sapflux'], ['sap.sap_flux_err','pdc.pdc_sapflux_err'], True, None, ], # binned light curve format 'binned-hat':[ '*binned*hat*.pkl', read_pklc, ['binned.aep_000.times','binned.atf_000.times'], ['binned.aep_000.mags','binned.atf_000.mags'], ['binned.aep_000.errs','binned.atf_000.errs'], False, None, ], 'binned-hp':[ '*binned*hp*.pkl', read_pklc, ['binned.iep1.times','binned.itf1.times'], ['binned.iep1.mags','binned.itf1.mags'], ['binned.iep1.errs','binned.itf1.errs'], False, None, ], 'binned-kep':[ '*binned*kep*.pkl', read_pklc, ['binned.sap_flux.times','binned.pdc_sapflux.times'], ['binned.sap_flux.mags','binned.pdc_sapflux.mags'], ['binned.sap_flux.errs','binned.pdc_sapflux.errs'], True, None, ], } def register_custom_lcformat(formatkey, fileglob, readerfunc, timecols, magcols, errcols, magsarefluxes=False, specialnormfunc=None): '''This adds a custom format LC to the dict above. Allows handling of custom format light curves for astrobase lcproc drivers. Once the format is successfully registered, light curves should work transparently with all of the functions below, by simply calling them with the formatkey in the lcformat keyword argument. Args ---- formatkey: <string>: what to use as the key for your light curve format fileglob: <string>: the default fileglob to use to search for light curve files in this custom format. This is a string like '*-whatever-???-*.*??-.lc'. readerfunc: <function>: this is the function to use to read light curves in the custom format. This should return a dictionary (the 'lcdict') with the following signature (the keys listed below are required, but others are allowed): {'objectid':'<this object's name>', 'objectinfo':{'ra':<this object's right ascension> 'decl':<this object's declination>}, ...time columns, mag columns, etc.} timecols, magcols, errcols: <list>: these are all lists of strings indicating which keys in the lcdict to use for processing. The lists must all have the same dimensions, e.g. if timecols = ['timecol1','timecol2'], then magcols must be something like ['magcol1','magcol2'] and errcols must be something like ['errcol1', 'errcol2']. This allows you to process multiple apertures or multiple types of measurements in one go. Each element in these lists can be a simple key, e.g. 'time' (which would correspond to lcdict['time']), or a composite key, e.g. 'aperture1.times.rjd' (which would correspond to lcdict['aperture1']['times']['rjd']). See the LCFORM dict above for examples. magsarefluxes: <boolean>: if this is True, then all functions will treat the magnitude columns as flux instead, so things like default normalization and sigma-clipping will be done correctly. If this is False, magnitudes will be treated as magnitudes. specialnormfunc: <function>: if you intend to use a special normalization function for your lightcurves, indicate it here. If None, the default normalization method used by lcproc is to find gaps in the time-series, normalize measurements grouped by these gaps to zero, then normalize the entire magnitude time series to global time series median using the astrobase.lcmath.normalize_magseries function. The function should take and return an lcdict of the same form as that produced by readerfunc above. For an example of a special normalization function, see normalize_lcdict_by_inst in the astrobase.hatlc module. ''' globals()['LCFORM'][formatkey] = [ fileglob, readerfunc, timecols, magcols, errcols, magsarefluxes, specialnormfunc ] LOGINFO('added %s to registry' % formatkey) ####################### ## UTILITY FUNCTIONS ## ####################### def lclist_parallel_worker(task): ''' This is a parallel worker for makelclist. task[0] = lcf task[1] = columns task[2] = readerfunc task[3] = lcndetkey ''' lcf, columns, readerfunc, lcndetkey = task # we store the full path of the light curve lcobjdict = {'lcfname':lcf} try: # read the light curve in lcdict = readerfunc(lcf) if len(lcdict) == 2: lcdict = lcdict[0] # insert all of the columns for colkey in columns: if '.' in colkey: getkey = colkey.split('.') else: getkey = [colkey] try: thiscolval = dict_get(lcdict, getkey) except: LOGWARNING('column %s does not exist for %s' % (colkey, lcf)) thiscolval = np.nan # update the lcobjdict with this value lcobjdict[getkey[-1]] = thiscolval except Exception as e: LOGEXCEPTION('could not figure out columns for %s' % lcf) # insert all of the columns as nans for colkey in columns: if '.' in colkey: getkey = colkey.split('.') else: getkey = [colkey] thiscolval = np.nan # update the lclistdict with this value lcobjdict[getkey[-1]] = thiscolval # now get the actual ndets; this excludes nans and infs for dk in lcndetkey: try: if '.' in dk: getdk = dk.split('.') else: getdk = [dk] ndetcol = dict_get(lcdict, getdk) actualndets = ndetcol[np.isfinite(ndetcol)].size lcobjdict['ndet_%s' % getdk[-1]] = actualndets except: lcobjdict['ndet_%s' % getdk[-1]] = np.nan return lcobjdict def make_lclist(basedir, outfile, lcformat='hat-sql', fileglob=None, recursive=True, columns=['objectid', 'objectinfo.ra','objectinfo.decl', 'objectinfo.ndet','objectinfo.sdssr'], makecoordindex=['objectinfo.ra','objectinfo.decl'], maxlcs=None, nworkers=20): '''This generates a list file compatible with filter_lclist below. Given a base directory where all the files are, and a light curve format, this will find all light curves, pull out the keys in each lcdict requested in the columns kwarg for each object, and write them to the requested output pickle file. These keys should be pointers to scalar values (i.e. something like objectinfo.ra is OK, but something like rjd won't work because it's a vector). If basedir is a list of directories, all of these will be searched recursively to find the matching light curve files. All of the keys in the columns kwarg should be present in the lcdict generated by the reader function for the specified lcformat. fileglob is a shell glob to use to select the filenames. If None, then the default one for the provided lcformat will be used. If recursive is True, then the function will search recursively in basedir for any light curves matching the specified criteria. This may take a while, especially on network filesystems. If makecoordindex is not None, it must be a two-element list of the lcdict keys for the right ascension and declination for each object. These will be used to make a kdtree for fast look-up by position later by filter_lclist. This returns a pickle file. ''' if lcformat not in LCFORM or lcformat is None: LOGERROR("can't figure out the light curve format") return if not fileglob: fileglob = LCFORM[lcformat][0] readerfunc = LCFORM[lcformat][1] # this is to get the actual ndet # set to the magnitudes column lcndetkey = LCFORM[lcformat][3] # handle the case where basedir is a list of directories if isinstance(basedir, list): matching = [] for bdir in basedir: # now find the files LOGINFO('searching for %s light curves in %s ...' % (lcformat, bdir)) if recursive == False: matching.extend(glob.glob(os.path.join(bdir, fileglob))) else: # use recursive glob for Python 3.5+ if sys.version_info[:2] > (3,4): matching.extend(glob.glob(os.path.join(bdir, '**', fileglob), recursive=True)) # otherwise, use os.walk and glob else: # use os.walk to go through the directories walker = os.walk(bdir) for root, dirs, files in walker: for sdir in dirs: searchpath = os.path.join(root, sdir, fileglob) foundfiles = glob.glob(searchpath) if foundfiles: matching.extend(foundfiles) # otherwise, handle the usual case of one basedir to search in else: # now find the files LOGINFO('searching for %s light curves in %s ...' % (lcformat, basedir)) if recursive == False: matching = glob.glob(os.path.join(basedir, fileglob)) else: # use recursive glob for Python 3.5+ if sys.version_info[:2] > (3,4): matching = glob.glob(os.path.join(basedir, '**', fileglob),recursive=True) # otherwise, use os.walk and glob else: # use os.walk to go through the directories walker = os.walk(basedir) matching = [] for root, dirs, files in walker: for sdir in dirs: searchpath = os.path.join(root, sdir, fileglob) foundfiles = glob.glob(searchpath) if foundfiles: matching.extend(foundfiles) # now that we have all the files, process them if matching and len(matching) > 0: LOGINFO('found %s light curves' % len(matching)) # cut down matching to maxlcs if maxlcs: matching = matching[:maxlcs] # prepare the output dict lclistdict = { 'basedir':basedir, 'lcformat':lcformat, 'fileglob':fileglob, 'recursive':recursive, 'columns':columns, 'makecoordindex':makecoordindex, 'nfiles':len(matching), 'objects': { } } # columns that will always be present in the output lclistdict derefcols = ['lcfname'] derefcols.extend(['ndet_%s' % x.split('.')[-1] for x in lcndetkey]) for dc in derefcols: lclistdict['objects'][dc] = [] # fill in the rest of the lclist columns from the columns kwarg for col in columns: # dereference the column thiscol = col.split('.') thiscol = thiscol[-1] lclistdict['objects'][thiscol] = [] derefcols.append(thiscol) # start collecting info LOGINFO('collecting light curve info...') tasks = [(x, columns, readerfunc, lcndetkey) for x in matching] with ProcessPoolExecutor(max_workers=nworkers) as executor: results = executor.map(lclist_parallel_worker, tasks) results = [x for x in results] # update the columns in the overall dict from the results of the # parallel map for result in results: for xcol in derefcols: lclistdict['objects'][xcol].append(result[xcol]) executor.shutdown() # done with collecting info # turn all of the lists in the lclistdict into arrays for col in lclistdict['objects']: lclistdict['objects'][col] = np.array(lclistdict['objects'][col]) # if we're supposed to make a spatial index, do so if (makecoordindex and isinstance(makecoordindex, list) and len(makecoordindex) == 2): try: # deref the column names racol, declcol = makecoordindex racol = racol.split('.')[-1] declcol = declcol.split('.')[-1] # get the ras and decls objra, objdecl = (lclistdict['objects'][racol], lclistdict['objects'][declcol]) # get the xyz unit vectors from ra,decl # since i had to remind myself: # https://en.wikipedia.org/wiki/Equatorial_coordinate_system cosdecl = np.cos(np.radians(objdecl)) sindecl = np.sin(np.radians(objdecl)) cosra = np.cos(np.radians(objra)) sinra = np.sin(np.radians(objra)) xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl)) # generate the kdtree kdt = sps.cKDTree(xyz,copy_data=True) # put the tree into the dict lclistdict['kdtree'] = kdt LOGINFO('kdtree generated for (ra, decl): %s' % makecoordindex) except Exception as e: LOGEXCEPTION('could not make kdtree for (ra, decl): %s' % makecoordindex) raise # write the pickle with open(outfile,'wb') as outfd: pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL) LOGINFO('done. LC info -> %s' % outfile) return outfile else: LOGERROR('no files found in %s matching %s' % (basedir, fileglob)) return None def filter_lclist(listpickle, objectidcol='objectid', xmatchexternal=None, xmatchdistarcsec=3.0, externalcolnums=(0,1,2), externalcolnames=['objectid','ra','decl'], externalcoldtypes='U20,f8,f8', externalcolsep=None, externalcommentchar='#', conesearch=None, columnfilters=None, conesearchworkers=1, copylcsto=None): '''This is used to collect light curves based on selection criteria. Uses the output of make_lclist above. This function returns a list of light curves matching various criteria speciifed by the xmatchexternal, conesearch, and columnfilters kwargs. Use this function to generate input lists for the parallel_varfeatures, parallel_pf, and parallel_timebin functions below. The filter operations are applied in this order if more than one is specified: xmatchexternal -> conesearch -> columnfilters. All results from these filter operations are joined using a logical AND operation. Returns a two elem tuple: (matching_object_lcfiles, matching_objectids) if conesearch and/or column filters are used. If xmatchexternal is also used, a three-elem tuple is returned: (matching_object_lcfiles, matching_objectids, extcat_matched_objectids). Args ---- objectidcol is the name of the object ID column in the listpickle file. If not None, xmatchexternal is a filename containing objectids, ras and decs to match the objects in this listpickle to by their positions. Use the other external* kwargs to provide the remaining info required: xmatchdistarcsec is the distance to use when matching in arcseconds. externalcolnums are the zero-indexed column numbers in the file containing objectid, ra, dec values. externalcolnames are the names of the columns to pull out from the external catalog file. externalcoldtypes are numpy dtype specifications for the objectid, ra, decl columns in the external catalog file. externalcolsep is the separator character to use to slice the external catalog file into columns. If None, will use blank space (space/tab) as the separator. conesearch is a three-element list: [center_ra_deg, center_decl_deg, search_radius_deg] This is used with the kdtree in the lclist pickle to only return objects that are in the specified region. consearchworkers specifies the number of parallel workers that can be launched by scipy to search for objects in the kdtree. columnfilters is a list of strings indicating how to filter on columns in the lclist pickle. All column filters are applied in the specified sequence and are combined with a logical AND operator. The format of each filter string should be: '<lclist column>|<operator>|<operand>' where: <lclist column> is a column in the lclist dict <operator> is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which correspond to the usual operators: <, >, <=, >=, ==, != respectively. <operand> is a float, int, or string. If copylcsto is not None, it is interpreted as a directory target to copy all the light curves that match the specified conditions. ''' with open(listpickle,'rb') as infd: lclist = pickle.load(infd) # generate numpy arrays of the matching object indexes. we do it this way so # we can AND everything at the end, instead of having to look up the objects # at these indices and running the columnfilter on them xmatch_matching_index = np.full_like(lclist['objects'][objectidcol], False, dtype=np.bool) conesearch_matching_index = np.full_like(lclist['objects'][objectidcol], False, dtype=np.bool) # do the xmatch first ext_matches = [] ext_matching_objects = [] if (xmatchexternal and isinstance(xmatchexternal, str) and os.path.exists(xmatchexternal)): try: # read in the external file extcat = np.genfromtxt(xmatchexternal, usecols=externalcolnums, delimiter=externalcolsep, names=externalcolnames, dtype=externalcoldtypes, comments=externalcommentchar) ext_cosdecl = np.cos(np.radians(extcat['decl'])) ext_sindecl = np.sin(np.radians(extcat['decl'])) ext_cosra = np.cos(np.radians(extcat['ra'])) ext_sinra = np.sin(np.radians(extcat['ra'])) ext_xyz = np.column_stack((ext_cosra*ext_cosdecl, ext_sinra*ext_cosdecl, ext_sindecl)) ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0) # get our kdtree our_kdt = lclist['kdtree'] # get the external kdtree ext_kdt = sps.cKDTree(ext_xyz) # do a query_ball_tree extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist) for extind, mind in enumerate(extkd_matchinds): if len(mind) > 0: ext_matches.append(mind[0]) # get the whole matching row for the ext objects recarray ext_matching_objects.append(extcat[extind]) ext_matches = np.array(ext_matches) if ext_matches.size > 0: # update the xmatch_matching_index xmatch_matching_index[ext_matches] = True LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' % (extfile, extmatchdist, ext_matches.size)) else: LOGERROR("xmatch: no objects were cross-matched to external " "catalog spec: %s, can't continue" % xmatchexternal) return None, None, None except Exception as e: LOGEXCEPTION('could not match to external catalog spec: %s' % repr(xmatchexternal)) raise # do the cone search next if (conesearch and isinstance(conesearch, list) and len(conesearch) == 3): try: racenter, declcenter, searchradius = conesearch cosdecl = np.cos(np.radians(declcenter)) sindecl = np.sin(np.radians(declcenter)) cosra = np.cos(np.radians(racenter)) sinra = np.sin(np.radians(racenter)) # this is the search distance in xyz unit vectors xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0) # get the kdtree our_kdt = lclist['kdtree'] # look up the coordinates kdtindices = our_kdt.query_ball_point([cosra*cosdecl, sinra*cosdecl, sindecl], xyzdist, n_jobs=conesearchworkers) if kdtindices and len(kdtindices) > 0: LOGINFO('cone search: objects within %.4f deg ' 'of (%.3f, %.3f): %s' % (searchradius, racenter, declcenter, len(kdtindices))) # update the conesearch_matching_index matchingind = kdtindices conesearch_matching_index[np.array(matchingind)] = True # we fail immediately if we found nothing. this assumes the user # cares more about the cone-search than the regular column filters else: LOGERROR("cone-search: no objects were found within " "%.4f deg of (%.3f, %.3f): %s, can't continue" % (searchradius, racenter, declcenter, len(kdtindices))) return None, None except Exception as e: LOGEXCEPTION('cone-search: could not run a cone-search, ' 'is there a kdtree present in %s?' % listpickle) raise # now that we're done with cone-search, do the column filtering allfilterinds = [] if columnfilters and isinstance(columnfilters, list): # go through each filter for cfilt in columnfilters: try: fcol, foperator, foperand = cfilt.split('|') foperator = FILTEROPS[foperator] # generate the evalstring filterstr = ( "np.isfinite(lclist['objects']['%s']) & " "(lclist['objects']['%s'] %s %s)" ) % (fcol, fcol, foperator, foperand) filterind = eval(filterstr) ngood = lclist['objects'][objectidcol][filterind].size LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood)) allfilterinds.append(filterind) except Exception as e: LOGEXCEPTION('filter: could not understand filter spec: %s' % cfilt) LOGWARNING('filter: not applying this broken filter') # now that we have all the filter indices good to go # logical-AND all the things # make sure we only do filtering if we were told to do so if (xmatchexternal or conesearch or columnfilters): filterstack = [] if xmatchexternal: filterstack.append(xmatch_matching_index) if conesearch: filterstack.append(conesearch_matching_index) if columnfilters: filterstack.extend(allfilterinds) finalfilterind = np.column_stack(filterstack) finalfilterind = np.all(finalfilterind, axis=1) # get the filtered object light curves and object names filteredobjectids = lclist['objects'][objectidcol][finalfilterind] filteredlcfnames = lclist['objects']['lcfname'][finalfilterind] else: filteredobjectids = lclist['objects'][objectidcol] filteredlcfnames = lclist['objects']['lcfname'] # if copylcsto is not None, copy LCs over to it if copylcsto is not None: if not os.path.exists(copylcsto): os.mkdir(copylcsto) if TQDM: lciter = tqdm(filteredlcfnames) else: lciter = filteredlcfnames LOGINFO('copying matching light curves to %s' % copylcsto) for lc in lciter: shutil.copy(lc, copylcsto) LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size) if xmatchexternal and len(ext_matching_objects) > 0: return filteredlcfnames, filteredobjectids, ext_matching_objects else: return filteredlcfnames, filteredobjectids ########################## ## BINNING LIGHT CURVES ## ########################## def timebinlc(lcfile, binsizesec, outdir=None, lcformat='hat-sql', timecols=None, magcols=None, errcols=None, minbinelems=7): ''' This bins the given light curve file in time using binsizesec. ''' if lcformat not in LCFORM or lcformat is None: LOGERROR('unknown light curve format specified: %s' % lcformat) return None (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = LCFORM[lcformat] # override the default timecols, magcols, and errcols # using the ones provided to the function if timecols is None: timecols = dtimecols if magcols is None: magcols = dmagcols if errcols is None: errcols = derrcols # get the LC into a dict lcdict = readerfunc(lcfile) if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict): lcdict = lcdict[0] # skip already binned light curves if 'binned' in lcdict: LOGERROR('this light curve appears to be binned already, skipping...') return None for tcol, mcol, ecol in zip(timecols, magcols, errcols): # dereference the columns and get them from the lcdict if '.' in tcol: tcolget = tcol.split('.') else: tcolget = [tcol] times = dict_get(lcdict, tcolget) if '.' in mcol: mcolget = mcol.split('.') else: mcolget = [mcol] mags = dict_get(lcdict, mcolget) if '.' in ecol: ecolget = ecol.split('.') else: ecolget = [ecol] errs = dict_get(lcdict, ecolget) # normalize here if not using special normalization if normfunc is None: ntimes, nmags = normalize_magseries( times, mags, magsarefluxes=magsarefluxes ) times, mags, errs = ntimes, nmags, errs # now bin the mag series as requested binned = time_bin_magseries_with_errs(times, mags, errs, binsize=binsizesec, minbinelems=minbinelems) # put this into the special binned key of the lcdict # we use mcolget[-1] here so we can deal with dereferenced magcols like # sap.sap_flux or pdc.pdc_sapflux if 'binned' not in lcdict: lcdict['binned'] = {mcolget[-1]: {'times':binned['binnedtimes'], 'mags':binned['binnedmags'], 'errs':binned['binnederrs'], 'nbins':binned['nbins'], 'timebins':binned['jdbins'], 'binsizesec':binsizesec}} else: lcdict['binned'][mcolget[-1]] = {'times':binned['binnedtimes'], 'mags':binned['binnedmags'], 'errs':binned['binnederrs'], 'nbins':binned['nbins'], 'timebins':binned['jdbins'], 'binsizesec':binsizesec} # done with binning for all magcols, now generate the output file # this will always be a pickle if outdir is None: outdir = os.path.dirname(lcfile) outfile = os.path.join(outdir, '%s-binned%.1fsec-%s.pkl' % (lcdict['objectid'], binsizesec, lcformat)) with open(outfile, 'wb') as outfd: pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL) return outfile def timebinlc_worker(task): ''' This is a parallel worker for the function below. task[0] = lcfile task[1] = binsizesec task[3] = {'outdir','lcformat','timecols','magcols','errcols','minbinelems'} ''' lcfile, binsizesec, kwargs = task try: binnedlc = timebinlc(lcfile, binsizesec, **kwargs) LOGINFO('%s binned using %s sec -> %s OK' % (lcfile, binsizesec, binnedlc)) except Exception as e: LOGEXCEPTION('failed to bin %s using binsizesec = %s' % (lcfile, binsizesec)) return None def parallel_timebin(lclist, binsizesec, maxobjects=None, outdir=None, lcformat='hat-sql', timecols=None, magcols=None, errcols=None, minbinelems=7, nworkers=32, maxworkertasks=1000): ''' This bins all the light curves in lclist using binsizesec. ''' if outdir and not os.path.exists(outdir): os.mkdir(outdir) if maxobjects is not None: lclist = lclist[:maxobjects] tasks = [(x, binsizesec, {'outdir':outdir, 'lcformat':lcformat, 'timecols':timecols, 'magcols':magcols, 'errcols':errcols, 'minbinelems':minbinelems}) for x in lclist] pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks) results = pool.map(timebinlc_worker, tasks) pool.close() pool.join() resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)} return resdict def parallel_timebin_lcdir(lcdir, binsizesec, maxobjects=None, outdir=None, lcformat='hat-sql', timecols=None, magcols=None, errcols=None, minbinelems=7, nworkers=32, maxworkertasks=1000): ''' This bins all the light curves in lcdir using binsizesec. ''' # get the light curve glob associated with specified lcformat if lcformat not in LCFORM or lcformat is None: LOGERROR('unknown light curve format specified: %s' % lcformat) return None (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = LCFORM[lcformat] lclist = sorted(glob.glob(os.path.join(lcdir, fileglob))) return parallel_timebin_lclist(lclist, binsizesec, maxobjects=maxobjects, outdir=outdir, lcformat=lcformat, timecols=timecols, magcols=magcols, errcols=errcols, minbinelems=minbinelems, nworkers=nworkers, maxworkertasks=maxworkertasks) ########################## ## VARIABILITY FEATURES ## ########################## def get_varfeatures(lcfile, outdir, timecols=None, magcols=None, errcols=None, mindet=1000, lcformat='hat-sql'): ''' This runs varfeatures on a single LC file. ''' if lcformat not in LCFORM or lcformat is None: LOGERROR('unknown light curve format specified: %s' % lcformat) return None (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = LCFORM[lcformat] # override the default timecols, magcols, and errcols # using the ones provided to the function if timecols is None: timecols = dtimecols if magcols is None: magcols = dmagcols if errcols is None: errcols = derrcols try: # get the LC into a dict lcdict = readerfunc(lcfile) if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict): lcdict = lcdict[0] resultdict = {'objectid':lcdict['objectid'], 'info':lcdict['objectinfo'], 'lcfbasename':os.path.basename(lcfile)} # normalize using the special function if specified if normfunc is not None: lcdict = normfunc(lcdict) for tcol, mcol, ecol in zip(timecols, magcols, errcols): # dereference the columns and get them from the lcdict if '.' in tcol: tcolget = tcol.split('.') else: tcolget = [tcol] times = dict_get(lcdict, tcolget) if '.' in mcol: mcolget = mcol.split('.') else: mcolget = [mcol] mags = dict_get(lcdict, mcolget) if '.' in ecol: ecolget = ecol.split('.') else: ecolget = [ecol] errs = dict_get(lcdict, ecolget) # normalize here if not using special normalization if normfunc is None: ntimes, nmags = normalize_magseries( times, mags, magsarefluxes=magsarefluxes ) times, mags, errs = ntimes, nmags, errs # make sure we have finite values finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs) # make sure we have enough finite values if mags[finind].size < mindet: LOGINFO('not enough LC points: %s in normalized %s LC: %s' % (mags[finind].size, mcol, os.path.basename(lcfile))) resultdict[mcolget[-1]] = None else: # get the features for this magcol lcfeatures = varfeatures.all_nonperiodic_features( times, mags, errs ) resultdict[mcolget[-1]] = lcfeatures # now that we've collected all the magcols, we can choose which is the # "best" magcol. this is defined as the magcol that gives us the # smallest LC MAD. try: magmads = np.zeros(len(magcols)) for mind, mcol in enumerate(magcols): if '.' in mcol: mcolget = mcol.split('.') else: mcolget = [mcol] magmads[mind] = resultdict[mcolget[-1]]['mad'] # smallest MAD index bestmagcolind = np.where(magmads == np.min(magmads))[0] resultdict['bestmagcol'] = magcols[bestmagcolind] except: resultdict['bestmagcol'] = None outfile = os.path.join(outdir, 'varfeatures-%s.pkl' % resultdict['objectid']) with open(outfile, 'wb') as outfd: pickle.dump(resultdict, outfd, protocol=4) return outfile except Exception as e: LOGEXCEPTION('failed to get LC features for %s because: %s' % (os.path.basename(lcfile), e)) return None def varfeatures_worker(task): ''' This wraps varfeatures. ''' try: lcfile, outdir, timecols, magcols, errcols, mindet, lcformat = task return get_varfeatures(lcfile, outdir, timecols=timecols, magcols=magcols, errcols=errcols, mindet=mindet, lcformat=lcformat) except: return None def serial_varfeatures(lclist, outdir, maxobjects=None, timecols=None, magcols=None, errcols=None, mindet=1000, lcformat='hat-sql', nworkers=None): if maxobjects: lclist = lclist[:maxobjects] tasks = [(x, outdir, timecols, magcols, errcols, mindet, lcformat) for x in lclist] for task in tqdm(tasks): result = varfeatures_worker(task) def parallel_varfeatures(lclist, outdir, maxobjects=None, timecols=None, magcols=None, errcols=None, mindet=1000, lcformat='hat-sql', nworkers=None): ''' This runs varfeatures in parallel for all light curves in lclist. ''' # make sure to make the output directory if it doesn't exist if not os.path.exists(outdir): os.makedirs(outdir) if maxobjects: lclist = lclist[:maxobjects] tasks = [(x, outdir, timecols, magcols, errcols, mindet, lcformat) for x in lclist] with ProcessPoolExecutor(max_workers=nworkers) as executor: resultfutures = executor.map(varfeatures_worker, tasks) results = [x for x in resultfutures] resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)} return resdict def parallel_varfeatures_lcdir(lcdir, outdir, maxobjects=None, timecols=None, magcols=None, errcols=None, recursive=True, mindet=1000, lcformat='hat-sql', nworkers=None): ''' This runs parallel variable feature extraction for a directory of LCs. ''' if lcformat not in LCFORM or lcformat is None: LOGERROR('unknown light curve format specified: %s' % lcformat) return None fileglob = LCFORM[lcformat][0] # now find the files LOGINFO('searching for %s light curves in %s ...' % (lcformat, lcdir)) if recursive == False: matching = glob.glob(os.path.join(lcdir, fileglob)) else: # use recursive glob for Python 3.5+ if sys.version_info[:2] > (3,4): matching = glob.glob(os.path.join(lcdir, '**', fileglob),recursive=True) # otherwise, use os.walk and glob else: # use os.walk to go through the directories walker = os.walk(lcdir) matching = [] for root, dirs, files in walker: for sdir in dirs: searchpath = os.path.join(root, sdir, fileglob) foundfiles = glob.glob(searchpath) if foundfiles: matching.extend(foundfiles) # now that we have all the files, process them if matching and len(matching) > 0: LOGINFO('found %s light curves, getting varfeatures...' % len(matching)) return parallel_varfeatures(matching, outdir, maxobjects=maxobjects, timecols=timecols, magcols=magcols, errcols=errcols, mindet=mindet, lcformat=lcformat, nworkers=nworkers) else: LOGERROR('no light curve files in %s format found in %s' % (lcformat, lcdir)) return None ####################### ## PERIODIC FEATURES ## ####################### def get_periodicfeatures(pfpickle, lcbasedir, outdir, fourierorder=5, # these are depth, duration, ingress duration transitparams=[-0.01,0.1,0.1], # these are depth, duration, depth ratio, secphase ebparams=[-0.2,0.3,0.7,0.5], pdiff_threshold=1.0e-4, sidereal_threshold=1.0e-4, sampling_peak_multiplier=5.0, sampling_startp=None, sampling_endp=None, starfeatures=None, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', sigclip=10.0, magsarefluxes=False, verbose=True, raiseonfail=False): '''This gets all periodic features for the object. If starfeatures is not None, it should be the filename of the starfeatures-<objectid>.pkl created by get_starfeatures for this object. This is used to get the neighbor's light curve and phase it with this object's period to see if this object is blended. ''' if lcformat not in LCFORM or lcformat is None: LOGERROR('unknown light curve format specified: %s' % lcformat) return None (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = LCFORM[lcformat] # open the pfpickle if pfpickle.endswith('.gz'): infd = gzip.open(pfpickle) else: infd = open(pfpickle) pf = pickle.load(infd) infd.close() lcfile = os.path.join(lcbasedir, pf['lcfbasename']) objectid = pf['objectid'] if 'kwargs' in pf: kwargs = pf['kwargs'] else: kwargs = None # override the default timecols, magcols, and errcols # using the ones provided to the periodfinder # if those don't exist, use the defaults from the lcformat def if kwargs and 'timecols' in kwargs and timecols is None: timecols = kwargs['timecols'] elif not kwargs and not timecols: timecols = dtimecols if kwargs and 'magcols' in kwargs and magcols is None: magcols = kwargs['magcols'] elif not kwargs and not magcols: magcols = dmagcols if kwargs and 'errcols' in kwargs and errcols is None: errcols = kwargs['errcols'] elif not kwargs and not errcols: errcols = derrcols # check if the light curve file exists if not os.path.exists(lcfile): LOGERROR("can't find LC %s for object %s" % (lcfile, objectid)) return None # check if we have neighbors we can get the LCs for if starfeatures is not None and os.path.exists(starfeatures): with open(starfeatures,'rb') as infd: starfeat = pickle.load(infd) if starfeat['closestnbrlcfname'].size > 0: nbr_full_lcf = starfeat['closestnbrlcfname'][0] # check for this LC in the lcbasedir if os.path.exists(os.path.join(lcbasedir, os.path.basename(nbr_full_lcf))): nbrlcf = os.path.join(lcbasedir, os.path.basename(nbr_full_lcf)) # if it's not there, check for this file at the full LC location elif os.path.exists(nbr_full_lcf): nbrlcf = nbr_full_lcf # otherwise, we can't find it, so complain else: LOGWARNING("can't find neighbor light curve file: %s in " "its original directory: %s, or in this object's " "lcbasedir: %s, skipping neighbor processing..." % (os.path.basename(nbr_full_lcf), os.path.dirname(nbr_full_lcf), lcbasedir)) nbrlcf = None else: nbrlcf = None else: nbrlcf = None # now, start processing for periodic feature extraction try: # get the object LC into a dict lcdict = readerfunc(lcfile) if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict): lcdict = lcdict[0] # get the nbr object LC into a dict if there is one if nbrlcf is not None: nbrlcdict = readerfunc(nbrlcf) if isinstance(nbrlcdict, tuple) and isinstance(nbrlcdict[0],dict): nbrlcdict = nbrlcdict[0] # this will be the output file outfile = os.path.join(outdir, 'periodicfeatures-%s.pkl' % objectid) # normalize using the special function if specified if normfunc is not None: lcdict = normfunc(lcdict) if nbrlcf: nbrlcdict = normfunc(nbrlcdict) resultdict = {} for tcol, mcol, ecol in zip(timecols, magcols, errcols): # dereference the columns and get them from the lcdict if '.' in tcol: tcolget = tcol.split('.') else: tcolget = [tcol] times = dict_get(lcdict, tcolget) if nbrlcf: nbrtimes = dict_get(nbrlcdict, tcolget) else: nbrtimes = None if '.' in mcol: mcolget = mcol.split('.') else: mcolget = [mcol] mags = dict_get(lcdict, mcolget) if nbrlcf: nbrmags = dict_get(nbrlcdict, mcolget) else: nbrmags = None if '.' in ecol: ecolget = ecol.split('.') else: ecolget = [ecol] errs = dict_get(lcdict, ecolget) if nbrlcf: nbrerrs = dict_get(nbrlcdict, ecolget) else: nbrerrs = None # # filter out nans, etc. from the object and any neighbor LC # # get the finite values finind =
np.isfinite(times)
numpy.isfinite
import numpy as np import pandas as pd from scipy.stats import beta import deTiN.deTiN_utilities as du np.seterr(all='ignore') class model: """Model of tumor in normal (TiN) based on only candidate SSNVs. This estimate is most reliable when there are greater then 6 mutations and TiN is less then ~30%. Previously this estimate has been used on its own to assess TiN in targeted panel data where copy number data is usually limited but SSNVs are well measured. TiN estimate : model.TiN Somatic classification of SSNVs : model.E_z (E_z > 0.5 -> somatic)""" def __init__(self, candidate_sites, p_somatic, resolution=101, f_thresh=0.15, depth=15, hot_spots_file='NA', skew=0.5): # variables follow notation: # ac = allele count n = normal t = tumor # Variables for SSNV fit self.TiN_range = np.linspace(0, 1, num=resolution) self.af = np.linspace(0.005, 1, num=200) # observed data self.contig = candidate_sites['contig'] self.position = candidate_sites['position'] self.genomic_coord_x = candidate_sites['genomic_coord_x'] self.n_alt_count = np.array(candidate_sites['n_alt_count']) self.n_ref_count = np.array(candidate_sites['n_ref_count']) self.n_depth = self.n_alt_count + self.n_ref_count self.normal_f = np.nan_to_num(np.true_divide(self.n_alt_count, self.n_depth)) self.t_alt_count = np.array(candidate_sites['t_alt_count']) self.t_ref_count = np.array(candidate_sites['t_ref_count']) self.t_depth = self.t_alt_count + self.t_ref_count self.tumor_f = np.true_divide(self.t_alt_count, self.t_depth) self.number_of_sites = len(self.n_alt_count) self.candidate_sites = np.logical_and(np.logical_and(self.tumor_f > f_thresh, self.t_depth > depth), self.n_depth > depth) # hyperparameter self.p_somatic = np.zeros([self.number_of_sites, 1]) + p_somatic if hot_spots_file != 'NA': hot_spots = pd.read_csv(hot_spots_file, sep='\t', low_memory=False, index_col=False) if type(hot_spots['Chromosome'][0]) == str: hot_spots['contig'] = du.chr2num(np.array(hot_spots['Chromosome'])) else: hot_spots['contig'] = np.array(hot_spots['Chromosome']) - 1 hot_spots = hot_spots[np.isfinite(hot_spots['contig'])] hot_spots['genomic_coord_x'] = du.hg19_to_linear_positions( np.array(hot_spots['contig']), np.array(hot_spots['Position'])) for index, hot_spot in hot_spots.iterrows(): if np.size(np.where(self.genomic_coord_x == hot_spot['genomic_coord_x'])) > 0: print('Using user provided probabilities for cancer hot spots:') print(hot_spot['Chromosome'] + ' ' + hot_spot['Position']) self.p_somatic[np.where(self.genomic_coord_x == hot_spot['genomic_coord_x'])] = hot_spot[ 'Probability'] # parameter self.TiN = 0 self.CI_tin_high = [] self.CI_tin_low = [] self.E_z = np.zeros([self.number_of_sites, 1]) self.skew = skew # expected allele fraction of minor allele given allelic copy data self.psi = .5 - np.array(candidate_sites['f_acs']) self.t_het_direction = self.tumor_f < self.skew self.t_het_direction = self.t_het_direction * -1 self.t_het_direction[self.t_het_direction == 0] = 1 # determine ratio of tumor to normal copies given tau and TiN at each locus self.tau = candidate_sites['tau'] self.tin_correct_tau = np.multiply(self.TiN_range, candidate_sites['tau'][:, np.newaxis]) self.tin_correct_normal_tau = np.multiply((1 - self.TiN_range), 2) self.CN_ratio = np.divide(self.tin_correct_tau, np.array(self.tin_correct_tau + self.tin_correct_normal_tau)) # random variables self.rv_normal_af = beta(self.n_alt_count + 1, self.n_ref_count + 1) self.rv_tumor_af = beta(self.t_alt_count + 1, self.t_ref_count + 1) # conditionals self.p_TiN_given_S = np.zeros([self.number_of_sites, resolution]) self.p_TiN_given_G = np.zeros([self.number_of_sites, resolution]) self.p_TiN_given_het = np.zeros([self.number_of_sites, resolution]) self.p_artifact = np.zeros([self.number_of_sites, 1]) # likelihood self.TiN_likelihood = np.zeros([resolution, 1]) def generate_conditional_ps(self): # p(TiN|Somatic) and p(TiN|Germline) t_het_direction = np.ones([self.number_of_sites, len(self.af)]) t_het_direction[:, 0:np.int(np.round(np.true_divide(len(self.af), 2)))] = -1 self.afexp = np.repeat(np.expand_dims(self.af, 1), self.number_of_sites, axis=1).T t_af_w = beta._cdf(self.afexp, np.expand_dims(self.t_alt_count + 1, 1), np.expand_dims(self.t_ref_count + 1, 1)) - beta._cdf(self.afexp - 0.005, np.expand_dims(self.t_alt_count + 1, 1), np.expand_dims(self.t_ref_count + 1, 1)) f_t_af = self.skew - np.abs(self.skew - self.afexp) t_af = np.multiply(self.afexp, np.expand_dims(self.n_depth, 1)) psi_t_af = self.skew - f_t_af psi_t_af = np.multiply(psi_t_af, t_het_direction) for TiN_idx, TiN in enumerate(self.TiN_range): n_ac_given_tin = np.multiply(t_af, np.expand_dims(self.CN_ratio[:, TiN_idx], 1)) exp_f = self.skew + np.multiply(psi_t_af, np.expand_dims(self.CN_ratio[:, TiN_idx], 1)) n_het_ac_given_tin = np.multiply(exp_f, self.n_depth[:, np.newaxis]) self.p_TiN_given_S[:, TiN_idx] += np.sum( np.multiply(beta._cdf(np.expand_dims(self.normal_f[:] + .01, 1), n_ac_given_tin + 1, self.n_depth[:, np.newaxis] - n_ac_given_tin + 1) - beta._cdf(np.expand_dims(self.normal_f[:], 1), n_ac_given_tin + 1, self.n_depth[:, np.newaxis] - n_ac_given_tin + 1), t_af_w), axis=1) self.p_TiN_given_het[:, TiN_idx] += np.sum( np.multiply(beta._cdf(np.expand_dims(self.normal_f[:] + .01, 1), n_het_ac_given_tin + 1, self.n_depth[:, np.newaxis] - n_het_ac_given_tin + 1) - beta._cdf(np.expand_dims(self.normal_f[:], 1), n_het_ac_given_tin + 1, self.n_depth[:, np.newaxis] - n_het_ac_given_tin + 1), t_af_w), axis=1) self.p_artifact = beta._cdf(self.normal_f + .01, self.t_alt_count + 1, self.t_ref_count + 1) - beta._cdf( self.normal_f, self.t_alt_count + 1, self.t_ref_count + 1) self.p_TiN_given_G = np.multiply(1 - self.p_artifact[:, np.newaxis], self.p_TiN_given_het) + np.multiply( self.p_artifact[:, np.newaxis], 1 - self.p_TiN_given_het) def expectation_of_z_given_TiN(self): # E step numerator = self.p_somatic * np.expand_dims(self.p_TiN_given_S[:, self.TiN], 1) denominator = numerator + (1 - self.p_somatic) * np.expand_dims(np.nan_to_num(self.p_TiN_given_G[:, self.TiN]), 1) self.E_z = np.nan_to_num(np.true_divide(numerator, denominator)) def maximize_TiN_likelihood(self): # M step self.TiN_likelihood = np.nansum(np.multiply(self.E_z[self.candidate_sites], np.ma.log(self.p_TiN_given_S[self.candidate_sites, :])), axis=0) + \ np.nansum(np.multiply(1 - self.E_z[self.candidate_sites], np.ma.log(self.p_TiN_given_G[self.candidate_sites, :])), axis=0) self.TiN =
np.argmax(self.TiN_likelihood)
numpy.argmax
#! /usr/bin/env python import tensorflow as tf import numpy as np import os import data_helpers from tensorflow.contrib import learn import csv from sklearn import metrics import yaml import itertools preps = ['at', 'on', 'in', 'by', 'for', 'against', 'to', 'from', 'between', 'during', 'with', 'about', 'of'] def softmax(x): """Compute softmax values for each sets of scores in x.""" if x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) with open("config.yml", 'r') as ymlfile: cfg = yaml.load(ymlfile) # Parameters # ================================================== # Data Parameters # Eval Parameters tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") tf.flags.DEFINE_string("checkpoint_dir", "/u/a/n/anant/539_project/runs/2017-12-10 17:11:50.923482,glove,baseline,fc-3-layer,quadruple-hidden-neurons/best_checkpoints", "Checkpoint directory from training run") tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") datasets = None # CHANGE THIS: Load data. Load your own data here dataset_name = cfg["datasets"]["default"] if FLAGS.eval_train: if dataset_name == "mrpolarity": datasets = data_helpers.get_datasets_mrpolarity(cfg["datasets"][dataset_name]["positive_data_file"]["path"], cfg["datasets"][dataset_name]["negative_data_file"]["path"]) elif dataset_name == "20newsgroup": datasets = data_helpers.get_datasets_20newsgroup(subset="test", categories=cfg["datasets"][dataset_name]["categories"], shuffle=cfg["datasets"][dataset_name]["shuffle"], random_state=cfg["datasets"][dataset_name]["random_state"]) x_raw, y_test = data_helpers.load_data_labels(datasets) y_test = np.argmax(y_test, axis=1) print("Total number of test examples: {}".format(len(y_test))) else: if dataset_name == "mrpolarity": datasets = {"target_names": ['positive_examples', 'negative_examples']} x_raw = ["a masterpiece four years in the making", "everything is off."] y_test = [1, 0] else: datasets = {"target_names": ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']} x_raw = ["The number of reported cases of gonorrhea in Colorado increased", "I am in the market for a 24-bit graphics card for a PC"] y_test = [2, 1] x_words_raw, x_tags, x_labels, x_trees, x_indices, y, y_labels = data_helpers.load_data_labels('/u/a/n/anant/Dropbox/539_project/generated_test_data/') x_words = x_words_raw # x_words = x_words[1:1000] # x_tags = x_tags[1:1000] # x_labels = x_labels[1:1000] # x_trees = x_trees[1:1000] # x_indices = x_indices[1:1000] # y_labels = y_labels[1:1000] max_document_length = 50 valid_indices = [] for i in range(len(x_words)): if len(x_words[i].split(" ")) <= max_document_length: valid_indices.append(i) # Map data into vocabulary vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab") vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path) x_words = np.array(list(vocab_processor.transform(x_words))) vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "tags_vocab") tags_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path) x_tags = np.array(list(tags_vocab_processor.transform(x_tags))) vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "labels_vocab") labels_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path) x_labels = np.array(list(labels_vocab_processor.transform(x_labels))) for i in range(max(max_document_length, len(x_words))): if x_indices[i] < max_document_length: x_words[i][int(x_indices[i])] = 0 x_indices = np.array(x_indices) x_trees =
np.array(x_trees)
numpy.array
import matplotlib matplotlib.use("Agg") from imageio import imread import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np import scipy.signal as sg import scipy as sp def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1): # from cs231n assignments # First figure out what the size of the output should be N, C, H, W = x_shape assert (H + 2 * padding - field_height) % stride == 0 assert (W + 2 * padding - field_width) % stride == 0 out_height = (H + 2 * padding - field_height) / stride + 1 out_width = (W + 2 * padding - field_width) / stride + 1 i0 = np.repeat(np.arange(field_height), field_width) i0 = np.tile(i0, C) i1 = stride * np.repeat(np.arange(out_height), out_width) j0 = np.tile(np.arange(field_width), field_height * C) j1 = stride * np.tile(np.arange(out_width), out_height) i = i0.reshape(-1, 1) + i1.reshape(1, -1) j = j0.reshape(-1, 1) + j1.reshape(1, -1) k = np.repeat(
np.arange(C)
numpy.arange
import numpy as np from itertools import product from itertools import permutations import matplotlib.pyplot as plt import pickle import os import stimulus import parameters import analysis class Motifs: def __init__(self, data_dir, file_prefix, N = None): self.motifs = {} self.motif_sizes = [2,3,4] data_files = os.listdir(data_dir) for f in data_files: if f.startswith(file_prefix): print('Processing ', f) self.current_filename = f W, v = self.make_matrix(data_dir + f, 'elim_lesion', N) print(type(W)) if type(W) is list: for i,w1 in enumerate(W): self.find_motifs(w1, v) else: self.find_motifs(W, v) self.print_motif_list() def make_matrix(self, filename, method, N): x = pickle.load(open(filename, 'rb')) beh_threshold = 0.1 val_th = 0.1 ind_accurate = np.where(np.array(x['accuracy_hist']) > 0.98)[0] #N = np.argmax(ind_accurate) #N = 6 print('N = ', N) if method == 'elim_lesion' or method == 'elim': parameters.update_parameters(x['par']) s = stimulus.Stimulus() trial_info = s.generate_trial() if method == 'lesion': significant_weights_rnn = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_rnn'][0,:,:] > beh_threshold significant_weights_out = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_out'][0,:,:] > beh_threshold v = np.array([0]*x['parameters']['num_exc_units'] + [1]*x['parameters']['num_inh_units'] \ + [2]*x['parameters']['n_output']) W = np.vstack((significant_weights_rnn, significant_weights_out)) d = W.shape[0] - W.shape[1] W = np.hstack((W, np.zeros((W.shape[0], d)))) elif method == 'elim': num_units = 50 - N w1 = np.zeros((num_units, num_units)) w2 = np.zeros((3, num_units)) ind = np.where(x['gate_hist'][N]>0)[0] for i in range(num_units): for j in range(num_units): w1[i,j] = x['weights_hist'][N]['w_rnn'][ind[i], ind[j]] > val_th for j in range(3): w2[j,i] = x['weights_hist'][N]['w_out'][j, ind[i]] > val_th n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']])) n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:])) v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output']) W = np.vstack((w1, w2)) d = W.shape[0] - W.shape[1] W = np.hstack((W, np.zeros((W.shape[0], d)))) elif method == 'elim_lesion': num_units = 50 - N r = analysis.lesion_weights(trial_info, x['par']['h_init'], x['par']['syn_x_init'], x['par']['syn_u_init'], \ x['weights_hist'][N], x['gate_hist'][N]) #plt.imshow(np.squeeze(r['lesion_accuracy_rnn']), aspect='auto', interpolation = 'none') #plt.colorbar() #plt.show() w1_full = np.tile(x['accuracy_hist'][N],(x['par']['n_hidden'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_rnn']) > beh_threshold w2_full = np.tile(x['accuracy_hist'][N],(x['par']['n_output'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_out']) > beh_threshold w1 = np.zeros((num_units, num_units)) w2 = np.zeros((3, num_units)) ind = np.where(x['gate_hist'][N]>0)[0] for i in range(num_units): for j in range(num_units): w1[i,j] = w1_full[ind[i], ind[j]] for j in range(3): w2[j,i] = w2_full[j, ind[i]] #plt.imshow(w1, aspect='auto', interpolation = 'none') #plt.colorbar() #plt.show() print('accuracy ', x['accuracy_hist'][N]) n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']])) n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:])) v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output']) W = np.vstack((w1, w2)) d = W.shape[0] - W.shape[1] W = np.hstack((W, np.zeros((W.shape[0], d)))) plt.imshow(W, aspect='auto', interpolation = 'none') plt.colorbar() plt.show() print(v) elif method == 'stacked': W = [] for i in range(x['W_rnn'].shape[0]): w1 = np.reshape(x['W_rnn'][i,:], (50,50))>0.2 w2 = np.reshape(x['W_out'][i,:], (3,50))>0.2 v = np.array([0]*40 + [1]*10 + [2]*3) W1 = np.vstack((w1, w2)) d = W1.shape[0] - W1.shape[1] W1 = np.hstack((W1, np.zeros((W1.shape[0], d)))) W.append(W1) return W, v def connection_probs(self): unique_labels =
np.unique(self.v)
numpy.unique
""" Random Variables. This module implements random variables. Random variables are the main in- and outputs of probabilistic numerical methods. """ from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import numpy as np from probnum import utils as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is only available in Python >=3.8 from functools import cached_property except ImportError: from cached_property import cached_property _ValueType = TypeVar("ValueType") class RandomVariable(Generic[_ValueType]): """ Random variables are the main objects used by probabilistic numerical methods. Every probabilistic numerical method takes a random variable encoding the prior distribution as input and outputs a random variable whose distribution encodes the uncertainty arising from finite computation. The generic signature of a probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most random variables used by methods in ProbNum have Dirac or Gaussian measure. Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and linear operators. This may change their ``distribution`` and not necessarily all previously available methods are retained. The internals of :class:`RandomVariable` objects are assumed to be constant over their whole lifecycle. This is due to the caches used to make certain computations more efficient. As a consequence, altering the internal state of a :class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result in undefined behavior. In particular, this should be kept in mind when subclassing :class:`RandomVariable` or any of its descendants. Parameters ---------- shape : Shape of realizations of this random variable. dtype : Data type of realizations of this random variable. If ``object`` will be converted to ``numpy.dtype``. as_value_type : Function which can be used to transform user-supplied arguments, interpreted as realizations of this random variable, to an easy-to-process, normalized format. Will be called internally to transform the argument of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses. For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_` arguments, but we still want the user to be able to pass Python :class:`float`. Then ``as_value_type`` should be set to something like ``lambda x: np.float64(x)``. See Also -------- asrandvar : Transform into a :class:`RandomVariable`. Examples -------- """ # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals """Create a new random variable.""" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of the random variable self.__parameters = parameters.copy() if parameters is not None else {} self.__sample = sample self.__in_support = in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile = quantile # Properties of the random variable self.__mode = mode self.__median = median self.__mean = mean self.__cov = cov self.__var = var self.__std = std self.__entropy = entropy # Utilities self.__as_value_type = as_value_type def __repr__(self) -> str: return f"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>" @property def shape(self) -> ShapeType: """Shape of realizations of the random variable.""" return self.__shape @cached_property def ndim(self) -> int: return len(self.__shape) @cached_property def size(self) -> int: return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: """Data type of (elements of) a realization of this random variable.""" return self.__dtype @property def median_dtype(self) -> np.dtype: """The dtype of the :attr:`median`. It will be set to the dtype arising from the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the fact that, even for discrete random variables, e.g. integer-valued random variables, the :attr:`median` might lie in between two values in which case these values are averaged. For example, a uniform random variable on :math:`\\{ 1, 2, 3, 4 \\}` will have a median of :math:`2.5`. """ return self.__median_dtype @property def moment_dtype(self) -> np.dtype: """The dtype of any (function of a) moment of the random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to the dtype arising from the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the mathematical definition of a moment as a sum or an integral over products of probabilities and values of the random variable, which are represented as using the dtypes :class:`np.float_` and :attr:`dtype`, respectively. """ return self.__moment_dtype @property def random_state(self) -> RandomStateType: """Random state of the random variable. This attribute defines the RandomState object to use for drawing realizations from this random variable. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local :class:`~numpy.random.RandomState` instance. """ return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): """Get or set the RandomState object of the underlying distribution. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ self._random_state = _utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]: """ Parameters of the probability distribution. The parameters of the distribution such as mean, variance, et cetera stored in a ``dict``. """ return self.__parameters.copy() @cached_property def mode(self) -> _ValueType: """ Mode of the random variable. Returns ------- mode : float The mode of the random variable. """ if self.__mode is None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( "mode", mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self) -> _ValueType: """ Median of the random variable. To learn about the dtype of the median, see :attr:`median_dtype`. Returns ------- median : float The median of the distribution. """ if self.__shape != (): raise NotImplementedError( "The median is only defined for scalar random variables." ) median = self.__median() RandomVariable._check_property_value( "median", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def mean(self) -> _ValueType: """ Mean :math:`\\mathbb{E}(X)` of the distribution. To learn about the dtype of the mean, see :attr:`moment_dtype`. Returns ------- mean : array-like The mean of the distribution. """ if self.__mean is None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( "mean", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self) -> _ValueType: """ Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` of the random variable. To learn about the dtype of the covariance, see :attr:`moment_dtype`. Returns ------- cov : array-like The kernels of the random variable. """ # pylint: disable=line-too-long if self.__cov is None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( "covariance", cov, shape=(self.size, self.size) if self.ndim > 0 else (), dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self) -> _ValueType: """ Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of the distribution. To learn about the dtype of the variance, see :attr:`moment_dtype`. Returns ------- var : array-like The variance of the distribution. """ if self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError from exc else: var = self.__var() RandomVariable._check_property_value( "variance", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def std(self) -> _ValueType: """ Standard deviation of the distribution. To learn about the dtype of the standard deviation, see :attr:`moment_dtype`. Returns ------- std : array-like The standard deviation of the distribution. """ if self.__std is None: try: std = np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError from exc else: std = self.__std() RandomVariable._check_property_value( "standard deviation", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self) -> np.float_: if self.__entropy is None: raise NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( "entropy", entropy, force_scalar=True ) return entropy def in_support(self, x: _ValueType) -> bool: if self.__in_support is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError( f"The function `in_support` must return a `bool`, but its return value " f"is of type `{type(x)}`." ) return in_support def sample(self, size: ShapeArgType = ()) -> _ValueType: """ Draw realizations from a random variable. Parameters ---------- size : tuple Size of the drawn sample of realizations. Returns ------- sample : array-like Sample of realizations with the given ``size`` and the inherent ``shape``. """ if self.__sample is None: raise NotImplementedError("No sampling method provided.") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_: """ Cumulative distribution function. Parameters ---------- x : array-like Evaluation points of the cumulative distribution function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The cdf evaluation will be broadcast over all additional dimensions. Returns ------- q : array-like Value of the cumulative density function at the given points. """ if self.__cdf is not None: return RandomVariable._ensure_numpy_float( "cdf", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else: raise NotImplementedError( f"Neither the `cdf` nor the `logcdf` of the random variable object " f"with type `{type(self).__name__}` is implemented." ) def logcdf(self, x: _ValueType) -> np.float_: """ Log-cumulative distribution function. Parameters ---------- x : array-like Evaluation points of the cumulative distribution function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The logcdf evaluation will be broadcast over all additional dimensions. Returns ------- q : array-like Value of the log-cumulative density function at the given points. """ if self.__logcdf is not None: return RandomVariable._ensure_numpy_float( "logcdf", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError( f"Neither the `logcdf` nor the `cdf` of the random variable object " f"with type `{type(self).__name__}` is implemented." ) def quantile(self, p: FloatArgType) -> _ValueType: """Quantile function. The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random variable :math:`X` is defined as :math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where :math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random variable. From the definition it follows that the quantile function always returns values of the same dtype as the random variable. For instance, for a discrete distribution over the integers, the returned quantiles will also be integers. This means that, in general, :math:`Q(0.5)` is not equal to the :attr:`median` as it is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for more details and examples. """ if self.__shape != (): raise NotImplementedError( "The quantile function is only defined for scalar random variables." ) if self.__quantile is None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise TypeError( "The given argument `p` can not be cast to a `np.floating` object." ) from exc quantile = self.__quantile(p) if quantile.shape != self.__shape: raise ValueError( f"The quantile function should return values of the same shape as the " f"random variable, i.e. {self.__shape}, but it returned a value with " f"{quantile.shape}." ) if quantile.dtype != self.__dtype: raise ValueError( f"The quantile function should return values of the same dtype as the " f"random variable, i.e. `{self.__dtype.name}`, but it returned a value " f"with dtype `{quantile.dtype.name}`." ) return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) -> "RandomVariable": """ Give a new shape to a random variable. Parameters ---------- newshape : int or tuple of ints New shape for the random variable. It must be compatible with the original shape. Returns ------- reshaped_rv : ``self`` with the new dimensions of ``shape``. """ newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) -> "RandomVariable": """ Transpose the random variable. Parameters ---------- axes : None, tuple of ints, or n ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random variable. """ return RandomVariable( shape=
np.empty(shape=self.shape)
numpy.empty
import pytest import sys, os import xarray as xr import numpy as np sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import process from process._common import ProcessArgumentRequired, ProcessArgumentInvalid FIXTURES_FOLDER = os.path.join(os.path.dirname(__file__), 'fixtures') ################################### # fixtures: ################################### @pytest.fixture def construct_data(): def _construct(data, bands, dims=('y', 'x', 'band'), band_aliases={"nir": "B08","red": "B04"}): xrdata = xr.DataArray(
np.array(data, dtype=np.float)
numpy.array
from typing import Tuple, Dict, Any, Union, Callable import numpy as np import scipy.ndimage as ndi from common.exceptionmanager import catch_error_exception from common.functionutil import ImagesUtil from preprocessing.imagegenerator import ImageGenerator _epsilon = 1e-6 class TransformRigidImages(ImageGenerator): def __init__(self, size_image: Union[Tuple[int, int, int], Tuple[int, int]], is_normalize_data: bool = False, type_normalize_data: str = 'samplewise', is_zca_whitening: bool = False, is_inverse_transform: bool = False, rescale_factor: float = None, preprocessing_function: Callable[[np.ndarray], np.ndarray] = None ) -> None: super(TransformRigidImages, self).__init__(size_image, num_images=1) if is_normalize_data: if type_normalize_data == 'featurewise': self._featurewise_center = True self._featurewise_std_normalization = True self._samplewise_center = False self._samplewise_std_normalization = False else: # type_normalize_data == 'samplewise' self._featurewise_center = False self._featurewise_std_normalization = False self._samplewise_center = True self._samplewise_std_normalization = True else: self._featurewise_center = False self._featurewise_std_normalization = False self._samplewise_center = False self._samplewise_std_normalization = False self._is_zca_whitening = is_zca_whitening self._zca_epsilon = 1e-6 self._rescale_factor = rescale_factor self._preprocessing_function = preprocessing_function self._mean = None self._std = None self._principal_components = None self._is_inverse_transform = is_inverse_transform self._initialize_gendata() def update_image_data(self, in_shape_image: Tuple[int, ...]) -> None: # self._num_images = in_shape_image[0] pass def _initialize_gendata(self) -> None: self._transform_matrix = None self._transform_params = None self._count_trans_in_images = 0 def _update_gendata(self, **kwargs) -> None: seed = kwargs['seed'] (self._transform_matrix, self._transform_params) = self._calc_gendata_random_transform(seed) self._count_trans_in_images = 0 def _get_image(self, in_image: np.ndarray) -> np.ndarray: is_type_input_image = (self._count_trans_in_images == 0) self._count_trans_in_images += 1 return self._get_transformed_image(in_image, is_type_input_image=is_type_input_image) def _get_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: if ImagesUtil.is_without_channels(self._size_image, in_image.shape): in_image = np.expand_dims(in_image, axis=-1) is_reshape_input_image = True else: is_reshape_input_image = False in_image = self._calc_transformed_image(in_image, is_type_input_image=is_type_input_image) if is_type_input_image: in_image = self._standardize(in_image) if is_reshape_input_image: in_image = np.squeeze(in_image, axis=-1) return in_image def _get_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: if ImagesUtil.is_without_channels(self._size_image, in_image.shape): in_image = np.expand_dims(in_image, axis=-1) is_reshape_input_image = True else: is_reshape_input_image = False if is_type_input_image: in_image = self._standardize_inverse(in_image) in_image = self._calc_inverse_transformed_image(in_image, is_type_input_image=is_type_input_image) if is_reshape_input_image: in_image = np.squeeze(in_image, axis=-1) return in_image def _calc_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: raise NotImplementedError def _calc_inverse_transformed_image(self, in_array: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: raise NotImplementedError def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: raise NotImplementedError def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: raise NotImplementedError def _standardize(self, in_image: np.ndarray) -> np.ndarray: if self._preprocessing_function: in_image = self._preprocessing_function(in_image) if self._rescale_factor: in_image *= self._rescale_factor if self._samplewise_center: in_image -= np.mean(in_image, keepdims=True) if self._samplewise_std_normalization: in_image /= (np.std(in_image, keepdims=True) + _epsilon) template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \ 'training data. Fit it first by calling \'fit(numpy_data)\'.' if self._featurewise_center: if self._mean is not None: in_image -= self._mean else: message = template_message_error % ('featurewise_center') catch_error_exception(message) if self._featurewise_std_normalization: if self._std is not None: in_image /= (self._std + _epsilon) else: message = template_message_error % ('featurewise_std_normalization') catch_error_exception(template_message_error % (message)) if self._is_zca_whitening: if self._principal_components is not None: flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:]))) whitex = np.dot(flatx, self._principal_components) in_image = np.reshape(whitex, in_image.shape) else: message = template_message_error % ('zca_whitening') catch_error_exception(message) return in_image def _standardize_inverse(self, in_image: np.ndarray) -> np.ndarray: template_message_error = 'This ImageDataGenerator specifies \'%s\', but it hasn\'t been fit on any ' \ 'training data. Fit it first by calling \'fit(numpy_data)\'.' if self._is_zca_whitening: if self._principal_components is not None: flatx = np.reshape(in_image, (-1, np.prod(in_image.shape[-3:]))) inverse_principal_componens = np.divide(1.0, self._principal_components) whitex = np.dot(flatx, inverse_principal_componens) in_image = np.reshape(whitex, in_image.shape) else: message = template_message_error % ('zca_whitening') catch_error_exception(message) if self._featurewise_std_normalization: if self._std is not None: in_image *= self._std else: message = template_message_error % ('featurewise_std_normalization') catch_error_exception(message) if self._featurewise_center: if self._mean is not None: in_image += self._mean else: message = template_message_error % ('featurewise_center') catch_error_exception(message) if self._samplewise_std_normalization: in_image *= np.std(in_image, keepdims=True) if self._samplewise_center: in_image += np.mean(in_image, keepdims=True) if self._rescale_factor: in_image /= self._rescale_factor if self._preprocessing_function: catch_error_exception('Not implemented inverse preprocessing function') return in_image @staticmethod def _flip_axis(in_image: np.ndarray, axis: int) -> np.ndarray: in_image = np.asarray(in_image).swapaxes(axis, 0) in_image = in_image[::-1, ...] in_image = in_image.swapaxes(0, axis) return in_image @staticmethod def _apply_channel_shift(in_image: np.ndarray, intensity: int, channel_axis: int = 0) -> np.ndarray: in_image = np.rollaxis(in_image, channel_axis, 0) min_x, max_x = np.min(in_image), np.max(in_image) channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in in_image] in_image = np.stack(channel_images, axis=0) in_image = np.rollaxis(in_image, 0, channel_axis + 1) return in_image def _apply_brightness_shift(self, in_image: np.ndarray, brightness: int) -> np.ndarray: catch_error_exception('Not implemented brightness shifting option...') # in_image = array_to_img(in_image) # in_image = imgenhancer_Brightness = ImageEnhance.Brightness(in_image) # in_image = imgenhancer_Brightness.enhance(brightness) # in_image = img_to_array(in_image) def get_text_description(self) -> str: raise NotImplementedError class TransformRigidImages2D(TransformRigidImages): _img_row_axis = 0 _img_col_axis = 1 _img_channel_axis = 2 def __init__(self, size_image: Tuple[int, int], is_normalize_data: bool = False, type_normalize_data: str = 'samplewise', is_zca_whitening: bool = False, rotation_range: float = 0.0, width_shift_range: float = 0.0, height_shift_range: float = 0.0, brightness_range: Tuple[float, float] = None, shear_range: float = 0.0, zoom_range: Union[float, Tuple[float, float]] = 0.0, channel_shift_range: float = 0.0, fill_mode: str = 'nearest', cval: float = 0.0, horizontal_flip: bool = False, vertical_flip: bool = False, rescale_factor: float = None, preprocessing_function: Callable[[np.ndarray], np.ndarray] = None ) -> None: self._rotation_range = rotation_range self._width_shift_range = width_shift_range self._height_shift_range = height_shift_range self._brightness_range = brightness_range self._shear_range = shear_range self._channel_shift_range = channel_shift_range self._fill_mode = fill_mode self._cval = cval self._horizontal_flip = horizontal_flip self._vertical_flip = vertical_flip if np.isscalar(zoom_range): self._zoom_range = (1 - zoom_range, 1 + zoom_range) elif len(zoom_range) == 2: self._zoom_range = (zoom_range[0], zoom_range[1]) else: message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range)) catch_error_exception(message) if self._brightness_range is not None: if len(self._brightness_range) != 2: message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range)) catch_error_exception(message) super(TransformRigidImages2D, self).__init__(size_image, is_normalize_data=is_normalize_data, type_normalize_data=type_normalize_data, is_zca_whitening=is_zca_whitening, rescale_factor=rescale_factor, preprocessing_function=preprocessing_function) def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: # Apply: 1st: rigid transformations # 2nd: channel shift intensity / flipping if self._transform_matrix is not None: in_image = self._apply_transform(in_image, self._transform_matrix, channel_axis=self._img_channel_axis, fill_mode=self._fill_mode, cval=self._cval) if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None): in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'], channel_axis=self._img_channel_axis) if self._transform_params.get('flip_horizontal', False): in_image = self._flip_axis(in_image, axis=self._img_col_axis) if self._transform_params.get('flip_vertical', False): in_image = self._flip_axis(in_image, axis=self._img_row_axis) if is_type_input_image and (self._transform_params.get('brightness') is not None): in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness']) return in_image def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: # Apply: 1st: channel shift intensity / flipping # 2nd: rigid transformations if is_type_input_image and (self._transform_params.get('brightness') is not None): in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness']) if self._transform_params.get('flip_vertical', False): in_image = self._flip_axis(in_image, axis=self._img_row_axis) if self._transform_params.get('flip_horizontal', False): in_image = self._flip_axis(in_image, axis=self._img_col_axis) if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None): in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'], channel_axis=self._img_channel_axis) if self._transform_matrix is not None: in_image = self._apply_transform(in_image, self._transform_matrix, channel_axis=self._img_channel_axis, fill_mode=self._fill_mode, cval=self._cval) return in_image def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: # compute composition of homographies if seed is not None: np.random.seed(seed) # **************************************************** if self._rotation_range: theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range)) else: theta = 0 if self._height_shift_range: tx = np.random.uniform(-self._height_shift_range, self._height_shift_range) if np.max(self._height_shift_range) < 1: tx *= self._size_image[self._img_row_axis] else: tx = 0 if self._width_shift_range: ty = np.random.uniform(-self._width_shift_range, self._width_shift_range) if np.max(self._width_shift_range) < 1: ty *= self._size_image[self._img_col_axis] else: ty = 0 if self._shear_range: shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range)) else: shear = 0 if self._zoom_range[0] == 1 and self._zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2) flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip flip_vertical = (np.random.random() < 0.5) * self._vertical_flip channel_shift_intensity = None if self._channel_shift_range != 0: channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range) brightness = None if self._brightness_range is not None: brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1]) transform_parameters = {'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} # **************************************************** # **************************************************** transform_matrix = None if theta != 0: rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) transform_matrix = rotation_matrix if tx != 0 or ty != 0: shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix) if shear != 0: shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix) if zx != 1 or zy != 1: zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix) if transform_matrix is not None: h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis] transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w) # **************************************************** return (transform_matrix, transform_parameters) def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: # compute composition of inverse homographies if seed is not None: np.random.seed(seed) # **************************************************** if self._rotation_range: theta = np.deg2rad(np.random.uniform(-self._rotation_range, self._rotation_range)) else: theta = 0 if self._height_shift_range: tx = np.random.uniform(-self._height_shift_range, self._height_shift_range) if self._height_shift_range < 1: tx *= self._size_image[self._img_row_axis] else: tx = 0 if self._width_shift_range: ty = np.random.uniform(-self._width_shift_range, self._width_shift_range) if self._width_shift_range < 1: ty *= self._size_image[self._img_col_axis] else: ty = 0 if self._shear_range: shear = np.deg2rad(np.random.uniform(-self._shear_range, self._shear_range)) else: shear = 0 if self._zoom_range[0] == 1 and self._zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 2) flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip flip_vertical = (np.random.random() < 0.5) * self._vertical_flip channel_shift_intensity = None if self._channel_shift_range != 0: channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range) brightness = None if self._brightness_range is not None: brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1]) transform_parameters = {'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} # **************************************************** # **************************************************** transform_matrix = None if theta != 0: rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0], [-np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) transform_matrix = rotation_matrix if tx != 0 or ty != 0: shift_matrix = np.array([[1, 0, -tx], [0, 1, -ty], [0, 0, 1]]) transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix) if shear != 0: shear_matrix = np.array([[1, np.tan(shear), 0], [0, 1.0 / np.cos(shear), 0], [0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix) if zx != 1 or zy != 1: zoom_matrix = np.array([[1.0 / zx, 0, 0], [0, 1.0 / zy, 0], [0, 0, 1]]) transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix) if transform_matrix is not None: h, w = self._size_image[self._img_row_axis], self._size_image[self._img_col_axis] transform_matrix = self._transform_matrix_offset_center(transform_matrix, h, w) # **************************************************** return (transform_matrix, transform_parameters) @staticmethod def _transform_matrix_offset_center(matrix: np.ndarray, x: int, y: int) -> np.ndarray: o_x = float(x) / 2 + 0.5 o_y = float(y) / 2 + 0.5 offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) return transform_matrix @staticmethod def _apply_transform(in_image: np.ndarray, transform_matrix: np.ndarray, channel_axis: int = 0, fill_mode: str = 'nearest', cval: float = 0.0) -> np.ndarray: in_image = np.rollaxis(in_image, channel_axis, 0) final_affine_matrix = transform_matrix[:2, :2] final_offset = transform_matrix[:2, 2] channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=1, mode=fill_mode, cval=cval) for x_channel in in_image] in_image = np.stack(channel_images, axis=0) in_image = np.rollaxis(in_image, 0, channel_axis + 1) return in_image def get_text_description(self) -> str: message = 'Rigid 2D transformations of images, with parameters...\n' message += 'rotation (plane_XY) range: \'%s\'...\n' % (self._rotation_range) message += 'shift (width, height) range: \'(%s, %s)\'...\n' \ % (self._width_shift_range, self._height_shift_range) message += 'flip (horizontal, vertical): \'(%s, %s)\'...\n' \ % (self._horizontal_flip, self._vertical_flip) message += 'zoom (min, max) range: \'(%s, %s)\'...\n' % (self._zoom_range[0], self._zoom_range[1]) message += 'shear (plane_XY) range: \'%s\'...\n' % (self._shear_range) message += 'fill mode, when applied transformation: \'%s\'...\n' % (self._fill_mode) return message class TransformRigidImages3D(TransformRigidImages): _img_dep_axis = 0 _img_row_axis = 1 _img_col_axis = 2 _img_channel_axis = 3 def __init__(self, size_image: Tuple[int, int, int], is_normalize_data: bool = False, type_normalize_data: str = 'samplewise', is_zca_whitening: bool = False, rotation_xy_range: float = 0.0, rotation_xz_range: float = 0.0, rotation_yz_range: float = 0.0, width_shift_range: float = 0.0, height_shift_range: float = 0.0, depth_shift_range: float = 0.0, brightness_range: Tuple[float, float] = None, shear_xy_range: float = 0.0, shear_xz_range: float = 0.0, shear_yz_range: float = 0.0, zoom_range: Union[float, Tuple[float, float]] = 0.0, channel_shift_range: float = 0.0, fill_mode: str = 'nearest', cval: float = 0.0, horizontal_flip: bool = False, vertical_flip: bool = False, axialdir_flip: bool = False, rescale_factor: float = None, preprocessing_function: Callable[[np.ndarray], np.ndarray] = None ) -> None: self._rotation_xy_range = rotation_xy_range self._rotation_xz_range = rotation_xz_range self._rotation_yz_range = rotation_yz_range self._width_shift_range = width_shift_range self._height_shift_range = height_shift_range self._depth_shift_range = depth_shift_range self._brightness_range = brightness_range self._shear_xy_range = shear_xy_range self._shear_xz_range = shear_xz_range self._shear_yz_range = shear_yz_range self._channel_shift_range = channel_shift_range self._fill_mode = fill_mode self._cval = cval self._horizontal_flip = horizontal_flip self._vertical_flip = vertical_flip self._axialdir_flip = axialdir_flip if np.isscalar(zoom_range): self._zoom_range = (1 - zoom_range, 1 + zoom_range) elif len(zoom_range) == 2: self._zoom_range = (zoom_range[0], zoom_range[1]) else: message = '\'zoom_range\' should be a float or a tuple of two floats. Received %s' % (str(zoom_range)) catch_error_exception(message) if self._brightness_range is not None: if len(self._brightness_range) != 2: message = '\'brightness_range\' should be a tuple of two floats. Received %s' % (str(brightness_range)) catch_error_exception(message) super(TransformRigidImages3D, self).__init__(size_image, is_normalize_data=is_normalize_data, type_normalize_data=type_normalize_data, is_zca_whitening=is_zca_whitening, rescale_factor=rescale_factor, preprocessing_function=preprocessing_function) def _calc_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: # Apply: 1st: rigid transformations # 2nd: channel shift intensity / flipping if self._transform_matrix is not None: in_image = self._apply_transform(in_image, self._transform_matrix, channel_axis=self._img_channel_axis, fill_mode=self._fill_mode, cval=self._cval) if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None): in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'], channel_axis=self._img_channel_axis) if self._transform_params.get('flip_horizontal', False): in_image = self._flip_axis(in_image, axis=self._img_col_axis) if self._transform_params.get('flip_vertical', False): in_image = self._flip_axis(in_image, axis=self._img_row_axis) if self._transform_params.get('flip_axialdir', False): in_image = self._flip_axis(in_image, axis=self._img_dep_axis) if is_type_input_image and (self._transform_params.get('brightness') is not None): in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness']) return in_image def _calc_inverse_transformed_image(self, in_image: np.ndarray, is_type_input_image: bool = False) -> np.ndarray: # Apply: 1st: channel shift intensity / flipping # 2nd: rigid transformations if is_type_input_image and (self._transform_params.get('brightness') is not None): in_image = self._apply_brightness_shift(in_image, self._transform_params['brightness']) if self._transform_params.get('flip_axialdir', False): in_image = self._flip_axis(in_image, axis=self._img_dep_axis) if self._transform_params.get('flip_vertical', False): in_image = self._flip_axis(in_image, axis=self._img_row_axis) if self._transform_params.get('flip_horizontal', False): in_image = self._flip_axis(in_image, axis=self._img_col_axis) if is_type_input_image and (self._transform_params.get('channel_shift_intensity') is not None): in_image = self._apply_channel_shift(in_image, self._transform_params['channel_shift_intensity'], channel_axis=self._img_channel_axis) if self._transform_matrix is not None: in_image = self._apply_transform(in_image, self._transform_matrix, channel_axis=self._img_channel_axis, fill_mode=self._fill_mode, cval=self._cval) return in_image def _calc_gendata_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: # compute composition of homographies if seed is not None: np.random.seed(seed) # **************************************************** if self._rotation_xy_range: angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range)) else: angle_xy = 0 if self._rotation_xz_range: angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range)) else: angle_xz = 0 if self._rotation_yz_range: angle_yz = np.deg2rad(np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range)) else: angle_yz = 0 if self._height_shift_range: tx = np.random.uniform(-self._height_shift_range, self._height_shift_range) if self._height_shift_range < 1: tx *= self._size_image[self._img_row_axis] else: tx = 0 if self._width_shift_range: ty = np.random.uniform(-self._width_shift_range, self._width_shift_range) if self._width_shift_range < 1: ty *= self._size_image[self._img_col_axis] else: ty = 0 if self._depth_shift_range: tz = np.random.uniform(-self._depth_shift_range, self._depth_shift_range) if self._depth_shift_range < 1: tz *= self._size_image[self._img_dep_axis] else: tz = 0 if self._shear_xy_range: shear_xy = np.deg2rad(np.random.uniform(-self._shear_xy_range, self._shear_xy_range)) else: shear_xy = 0 if self._shear_xz_range: shear_xz = np.deg2rad(np.random.uniform(-self._shear_xz_range, self._shear_xz_range)) else: shear_xz = 0 if self._shear_yz_range: shear_yz = np.deg2rad(np.random.uniform(-self._shear_yz_range, self._shear_yz_range)) else: shear_yz = 0 if self._zoom_range[0] == 1 and self._zoom_range[1] == 1: (zx, zy, zz) = (1, 1, 1) else: (zx, zy, zz) = np.random.uniform(self._zoom_range[0], self._zoom_range[1], 3) flip_horizontal = (np.random.random() < 0.5) * self._horizontal_flip flip_vertical = (np.random.random() < 0.5) * self._vertical_flip flip_axialdir = (np.random.random() < 0.5) * self._axialdir_flip channel_shift_intensity = None if self._channel_shift_range != 0: channel_shift_intensity = np.random.uniform(-self._channel_shift_range, self._channel_shift_range) brightness = None if self._brightness_range is not None: brightness = np.random.uniform(self._brightness_range[0], self._brightness_range[1]) transform_parameters = {'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'flip_axialdir': flip_axialdir, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} # **************************************************** # **************************************************** transform_matrix = None if angle_xy != 0: rotation_matrix = np.array([[1, 0, 0, 0], [0, np.cos(angle_xy), -np.sin(angle_xy), 0], [0, np.sin(angle_xy), np.cos(angle_xy), 0], [0, 0, 0, 1]]) transform_matrix = rotation_matrix if angle_xz != 0: rotation_matrix = np.array([[np.cos(angle_xz), np.sin(angle_xz), 0, 0], [-np.sin(angle_xz), np.cos(angle_xz), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) transform_matrix = \ rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix) if angle_yz != 0: rotation_matrix = np.array([[np.cos(angle_yz), 0, np.sin(angle_yz), 0], [0, 1, 0, 0], [-np.sin(angle_yz), 0, np.cos(angle_yz), 0], [0, 0, 0, 1]]) transform_matrix = \ rotation_matrix if transform_matrix is None else np.dot(transform_matrix, rotation_matrix) if tx != 0 or ty != 0 or tz != 0: shift_matrix = np.array([[1, 0, 0, tz], [0, 1, 0, tx], [0, 0, 1, ty], [0, 0, 0, 1]]) transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix) if shear_xy != 0: shear_matrix = np.array([[1, 0, 0, 0], [0, 1, -np.sin(shear_xy), 0], [0, 0, np.cos(shear_xy), 0], [0, 0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix) if shear_xz != 0: shear_matrix = np.array([[np.cos(shear_xz), 0, 0, 0], [-np.sin(shear_xz), 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix) if shear_yz != 0: shear_matrix = np.array([[np.cos(shear_yz), 0, 0, 0], [0, 1, 0, 0], [-np.sin(shear_yz), 0, 1, 0], [0, 0, 0, 1]]) transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix) if zx != 1 or zy != 1 or zz != 1: zoom_matrix = np.array([[zz, 0, 0, 0], [0, zx, 0, 0], [0, 0, zy, 0], [0, 0, 0, 1]]) transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix) if transform_matrix is not None: (d, h, w) = (self._size_image[self._img_dep_axis], self._size_image[self._img_row_axis], self._size_image[self._img_col_axis]) transform_matrix = self._transform_matrix_offset_center(transform_matrix, d, h, w) # **************************************************** return (transform_matrix, transform_parameters) def _calc_gendata_inverse_random_transform(self, seed: int = None) -> Tuple[np.ndarray, Dict[str, Any]]: # compute composition of inverse homographies if seed is not None: np.random.seed(seed) # **************************************************** if self._rotation_xy_range: angle_xy = np.deg2rad(np.random.uniform(-self._rotation_xy_range, self._rotation_xy_range)) else: angle_xy = 0 if self._rotation_xz_range: angle_xz = np.deg2rad(np.random.uniform(-self._rotation_xz_range, self._rotation_xz_range)) else: angle_xz = 0 if self._rotation_yz_range: angle_yz = np.deg2rad(
np.random.uniform(-self._rotation_yz_range, self._rotation_yz_range)
numpy.random.uniform
import os #__MAYAVI__ = False #try: # os.environ["QT_API"] = "pyqt" # from mayavi import mlab # __MAYAVI__ = True #except: # try: # os.environ["QT_API"] = "pyside" # from mayavi import mlab # __MAYAVI__ = True # except: # print("Unable to import mayavi") from ionotomo.geometry.tri_cubic import TriCubic from ionotomo.astro.frames.uvw_frame import UVW import numpy as np import pylab as plt import astropy.coordinates as ac import astropy.time as at import astropy.units as au ## utility functions try: import cmocean phase_cmap = cmocean.cm.phase except: phase_cmap = plt.cm.hsv def interp_nearest(x,y,z,x_,y_): dx = np.subtract.outer(x_,x) dy = np.subtract.outer(y_,y) r = dx**2 dy *= dy r += dy np.sqrt(r,out=r) arg = np.argmin(r,axis=1) z_ = z[arg] return z_ def plot_tci(tci,rays=None,filename=None,show=False): '''Plot the given tci using mayavi if possible. tci : TriCubic object to plot rays : array of shape (num_antennas, num_times, num_dirs, 4, num_steps) filename : name of figure file to save to without extension e.g. "figure1" show : boolean, whether to show the resulting figure.''' xmin = tci.xvec[0] xmax = tci.xvec[-1] ymin = tci.yvec[0] ymax = tci.yvec[-1] zmin = tci.zvec[0] zmax = tci.zvec[-1] X,Y,Z = np.mgrid[xmin:xmax:len(tci.xvec)*1j, ymin:ymax:len(tci.yvec)*1j, zmin:zmax:len(tci.zvec)*1j] #reshape array data = tci.get_shaped_array() xy = np.mean(data,axis=2) yz = np.mean(data,axis=0) zx = np.mean(data,axis=1) fig,(ax1,ax2,ax3) = plt.subplots(1,3) ax1.imshow(xy,origin='lower',aspect='auto') ax1.set_title("X-Y projection") ax2.imshow(yz,origin='lower',aspect='auto') ax2.set_title("Y-Z projection") ax3.imshow(zx,origin='lower',aspect='auto') ax3.set_title("Z-X projection") if filename is not None: plt.savefig("{}.png".format(filename),format='png') if show: plt.show() else: plt.close() def make_animation(datafolder,prefix='fig',fps=3): '''Given a datafolder with figures of format `prefix`-%04d.png create a video at framerate `fps`. Output is datafolder/animation.mp4''' if os.system('ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(fps,datafolder,prefix,datafolder)): print("{}/animation.mp4 exists already".format(datafolder)) def animate_tci_slices(TCI,output_folder,num_seconds=10.): '''Animate the slicing of a tci by showing the xz, yz, zy planes as they sweep across the volume (possibly depreciated)''' try: os.makedirs(output_folder) except: pass from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(12,12)) ax1 = fig.add_subplot(221, projection='3d') ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) M = TCI.get_shaped_array() if np.sum(M<0) > 0: print("Using linear scaling") log_spacing = False else: print("Using log scaling") log_spacing = True M[M==0] =
np.min(M[M>0])
numpy.min
# -*- mode: python; coding: utf-8 -*- # Copyright 2018 <NAME> # Licensed under the 2-clause BSD license. from argparse import Namespace import numpy as np from . import utils def set_b(b, block=[1, 1], **kwargs): """ Process b request. Sets data_type ('image', 'spectrum', 'profile'), and imSize Parameters ---------- b: list of pairs -> returns that list one pair -> returns that as a list float -> returns a full image grid 'disc' (or 'disk') -> returns [[0.0, 0.0]] 'stamp:bres:xmin,xmax,ymin,ymax' -> returns grid of postage stamp 'start:stop:step[<angle] -> string defining line 'n1,n2,n3[<angle]' -> csv list of b magnitudes block: image block as pair, e.g. [4, 10] is "block 4 of 10" kwargs : other args as necessary Returns ------- Namespace contains b, block, data_type, imSize """ # Deal with strings return_value = Namespace(b=None, data_type=None, block=block, imSize=None) if isinstance(b, str): b = b.lower() if b.startswith('dis'): return_value.b = [b] return_value.data_type = 'spectrum' elif b.startswith('stamp'): bres = float(b.split(':')[1]) bext = [float(x) for x in b.split(':')[2].split(',')] return_value.b = [] for x in np.arange(bext[0], bext[1] + bres / 2.0, bres): for y in np.arange(bext[2], bext[3] + bres / 2.0, bres): return_value.b.append([y, x]) xbr = len(
np.arange(bext[2], bext[3] + bres / 2.0, bres)
numpy.arange
import argparse import datetime import typing import pandas as pd import numpy as np import h5py import utils import os import tqdm import json import multiprocessing def get_stations_coordinates(stations) -> typing.Dict[str, typing.Tuple]: """ :return: dictionnary of str -> (coord_x, coord_y) mapping station coordinates to pixel """ # takes one hdf5 path hdf5_path = "/project/cq-training-1/project1/data/hdf5v7_8bit/2015.01.01.0800.h5" with h5py.File(hdf5_path, 'r') as h5_data: lats, lons = utils.fetch_hdf5_sample("lat", h5_data, 0), utils.fetch_hdf5_sample("lon", h5_data, 0) stations_coords = {} for region, lats_lons in stations.items(): coords = (np.argmin(
np.abs(lats - lats_lons[0])
numpy.abs
''' code in python3 Needs to be in the same directory as SONG (/.../song/) The python part of song call songy is written in python2. Make the following change: In /song/python/songy.py line 170: range(0,N*(N+1)/2) --> list(range(0,int(N*(N+1)/2))) ''' ####################################################################################### import import numpy as np import os import sys from numba import numba,jit,prange,config import h5py import subprocess #import matplotlib.pyplot as plt import time import params_relic as params import importlib importlib.reload(params) sys.path.insert(0, params.song_path+'python') # path to python module of song import songy as s from classy import Class import warnings warnings.filterwarnings("ignore") ####################################################################################### ####################################################################################### SONG wrapper def run_song(hkmax,hkmin): ''' Call this function in song repository: It will create the ini and pre files from the global parameters and run song ''' ini_file=r"""output = delta_cdm_bk T_cmb = 2.7255 N_eff = 3.046 reio_parametrization = reio_none tau_reio = 0.0952 k_pivot = 0.05 A_s = {} n_s = {} YHe = 0.2477055 gauge = newtonian output_single_precision = yes output_class_perturbations = yes background_verbose = 1 thermodynamics_verbose = 1 primordial_verbose = 1 spectra_verbose = 1 nonlinear_verbose = 1 lensing_verbose = 1 output_verbose = 1 perturbations_verbose = 1 perturbations2_verbose = 2 transfer_verbose = 1 transfer2_verbose = 1 bessels_verbose = 1 bessels2_verbose = 1 bispectra_verbose = 1 fisher_verbose = 1 format = camb write parameters = yes h={} omega_b={} omega_cdm={} Omega_k={} primordial_local_fnl_phi={} z_out={}""" pre_file=r"""sources2_k3_sampling = {} k3_size = {} k_min_tau0 = 0.05 k_max_tau0_over_l_max = 2.8 k_step_sub =0.1 k_logstep_super = 1.2 k_step_super = 0.025 k_step_transition = 0.2 quadsources_time_interpolation = cubic sources_time_interpolation = linear sources_k3_interpolation = cubic #linear tau_start_evolution_song = 0 start_small_k_at_tau_c_over_tau_h_song = 0.001 start_large_k_at_tau_h_over_tau_k_song = 0.04 sources2_k_sampling = {} k_min_custom_song = {} k_max_custom_song = {} k_size_custom_song = {}""" ini="./matter_{}.ini".format(params.key_song) pre="./matter_{}.pre".format(params.key_song) file = open(ini, "w") file.write(ini_file.format(params.A_s,params.n_s,params.h,params.omega_b*params.h**2, params.omega_cdm*params.h**2,params.omega_k,params.fnl,params.z)) file.close() file = open(pre, "w") if params.interp in ['nearest','lin']: file.write(pre_file.format('lin',int(params.N_song_k3),'lin',hkmin,hkmax,int(params.N_song_k12))) else : file.write(pre_file.format('smart',int(params.N_song_k3),'lin',hkmin,hkmax,int(params.N_song_k12))) file.close() os.system("./song "+ini+' '+pre) os.system("mv "+params.song_path+'output/sources_song_z000.dat '+params.song_path+"output/sources_song_z000_{}.dat".format(params.key_song)) def song_output(hkmax,hkmin,force): ''' Once song has run, this function load the output by using songy (see song/python/songy.py) routine FixedTauFile. It return the needed output: -song.get_source(b'delta_cdm') = song :second order kernel mulptiply by two transfer functions i.e. K(k1,k2,k3)*T_delta(k1)*T_delta(k2) in the expression int_k1_k2 (K(k1,k2,k3) T_delta(k1) T_delta(k2) zeta(k1) zeta(k2)) -song.tau conformal time corresponding to the redshift. It is needed to get the velocity potential (dK/dtau) -song.k1, song.k2, song.k3: grie of mode -song.flatidx: Song output shape is weird ! see song/python/songy.py -dk12,dk3: step of the grid ''' filename='sources_song_z000_{}.dat'.format(params.key_song) if not os.path.isfile(params.song_path+'output/'+filename) : print(params.song_path+'output/{} not found'.format(filename)) print('===========================================================================================') run_song(hkmax,hkmin) elif force: print('force running SONG') #os.system("rm "+params.song_path+'output/{} not found'.format(filename)) print('===========================================================================================') run_song(hkmax,hkmin) print('===========================================================================================') print('loading '+params.song_path+'output/{}'.format(filename)) song=s.FixedTauFile(params.song_path+'output/'+filename) if len(params.source)==0 and (len(song.k1)!=params.N_song_k12 \ or np.min(song.k1)!=hkmin or np.max(song.k1)!=hkmax): print('The output '+params.song_path+'output/ found does not have the right shape or hkmax/hkmin') print('SONG N_song_k1={}, you ask {}'.format(len(song.k1),params.N_song_k12)) print('SONG N_song_k3={}, you ask {}'.format(len(song.k3[0]),params.N_song_k3)) print('SONG hkmin={}, you ask {}'.format(np.min(song.k1),hkmin)) print('SONG hkmax={}, you ask {}'.format(np.max(song.k1),hkmax)) print('===========================================================================================') dk12=song.k1[1]-song.k1[0] k3=np.concatenate(song.k3) if params.interp in ['nearest','lin']: dk3=np.diff(song.k3)[:,0] else : dk3=np.array([],dtype=np.float32) for k1_ind,k1 in enumerate(song.k1): for k2_ind,k2 in enumerate(song.k2[k1_ind]): k3_ind=song.flatidx[k1_ind,k2_ind] dk3=np.append(dk3,song.k3[k3_ind][2]-song.k3[k3_ind][1]) k3sizes_cumsum = np.zeros(len(song.k3sizes_cumsum)+2,dtype=int) k3sizes_cumsum[1:-1]=song.k3sizes_cumsum k3sizes_cumsum[-1] =len(k3) return np.concatenate(song.get_source(b'delta_cdm')),song.tau,song.k1,np.concatenate(song.k2),k3,song.flatidx,dk12,dk3,k3sizes_cumsum def song_main(hkmax,hkmin,force=False): '''Main function for SONG ''' source,tau,k1,k2,k3,flatidx,dk12,dk3,k3sizes_cumsum=song_output(hkmax,hkmin,force) return source,k1/params.h,k2/params.h,k3/params.h,flatidx,dk12/params.h,dk3/params.h,k3sizes_cumsum ####################################################################################### ####################################################################################### first order transfert fct def trans(clss=True): ''' This function returns the Primordial power spectrum, the transfer functions of delta_cdm and phi, and derivative of the last transfer function. -Primordial power spectrum: Primordial = A_s(k/k_0)**(ns-1) / (k**3/(2*np.pi**2)). -delta_cdm transfer function: tr_delta_cdm(k,z)*zeta(k)=delta_cdm(k,z) -potential transfer function: tr_phi(z,k)*zeta(k)=phi(z,k) ''' if not clss: song=s.FixedTauFile(params.song_path+"output/sources_song_z000_{}.dat".format(params.key_song)) song.first_order_sources['k']/=params.h tr_delta_cdm=song.first_order_sources[b'delta_cdm'] tr_delta_b =song.first_order_sources[b'delta_b'] song.first_order_sources[b'delta_m']= (params.omega_b/(params.omega_b+params.omega_cdm)*tr_delta_b \ + params.omega_cdm/(params.omega_b+params.omega_cdm)*tr_delta_cdm) song.first_order_sources[b'phi']= song.first_order_sources[b'delta_m']*(-3*params.H**2/2)/(song.first_order_sources['k']**2+3*params.H**2) dk=np.diff(np.append(song.first_order_sources['k'],song.first_order_sources['k'][-1]*2-song.first_order_sources['k'][-2])) dT=np.diff(np.append(song.first_order_sources[b'phi'],song.first_order_sources[b'phi'][-1]*2-song.first_order_sources[b'phi'][-2])) song.first_order_sources[b'dTdk'] = dT/dk return song.first_order_sources else: clss = Class() clss.set({'gauge': 'newtonian', 'h':params.h,'omega_b':params.omega_b*params.h**2, 'omega_cdm': params.omega_cdm*params.h**2, 'output':'dTk,vTk','z_pk':1000}) clss.compute() #clss=np.loadtxt('class_tk.dat') tr=clss.get_transfer(z=params.z) tr['k'] = tr.pop('k (h/Mpc)') dk=np.diff(np.append(tr['k'],tr['k'][-1]*2-tr['k'][-2])) dT=np.diff(np.append(tr['phi'],tr['phi'][-1]*2-tr['phi'][-2])) tr['dTdk'] = dT/dk tr['d_m'] = (params.omega_cdm*tr['d_cdm'] + params.omega_b*tr['d_b'])/(params.omega_b+params.omega_cdm) tr['t_m'] = (params.omega_cdm*tr['t_cdm'] + params.omega_b*tr['t_b'])/(params.omega_b+params.omega_cdm) tr['v_m'] = -tr['t_m']/tr['k']**2/params.h return tr #clas = np.loadtxt('gevolution-1.2/class_tk.dat') #k=clas[:,0] #dk=np.diff(np.append(k,k[-1]*2-k[-2])) #dT=np.diff(np.append(clas[:,6],clas[:,6][-1]*2-clas[:,6][-2])) #xi = (clas[:,3] - 3*clas[:,6])/k**2 #first_order_sources={'k':clas[:,0],b'delta_cdm':clas[:,3],b'phi':clas[:,6],b'dTdk':dT/dk,b'xi':xi,b'v':-2*clas[:,6]/3/params.H} #return first_order_sources def primordial(k): return params.A_s*(k/(params.k_pivot/params.h))**(params.n_s-1)/k**3*2*np.pi**2 def powerspectrum(k,delta_cdm): prim = primordial(k) T=np.interp(k,delta_cdm[0],delta_cdm[1]) return prim*T**2 ####################################################################################### ####################################################################################### mode grid def k_distrib(k_min,N,klbd,absolute=True): ''' Inputs: -k_min: Minimum mode to be consider. Setting k_min automatically set the step dk=k_min because in order for k-k1 to be always on the grid k1, we need to include 0 and to have a constant step dk. -N size of the grid. In order to include 0. If it is not odd, we set N+=1 (the final ifft return the right even N grid) -klbd: k_lambda: if absolute==True: the function will return the closest in the grid else: klbd is considered as being a ratio, return kL=k[N//2:][int(klbd*N//2)] output: klin_concat,kmax,N,dk,klambda -k: list of k coordinate -kmax: largest mode to be considered -N like input -k_min in float32 -kL: actual k_lambda ''' if N%2==0: print('N has to be odd to include 0: N+=1') N+=1 params.N=N k=np.linspace(-(N//2)*k_min,N//2*k_min,N,dtype=np.float32) if absolute: idxL=np.where(np.abs(klbd-k[N//2:])==np.min(np.abs(klbd-k[N//2:])))[0] kL=k[N//2:][idxL][0] else: kL=k[N//2:][int(klbd*N//2)] return k,np.float32(N//2*k_min),N,np.float32(k_min),kL def W(grid,field): if params.coarse_graine: l = 2*np.pi/params.kmin/(N-1) k1,k2,k3=grid[0][N//2:]*l/2/np.pi,grid[1]*l/2/np.pi,grid[2]*l/2/np.pi W=(np.sinc(k1)*np.sinc(k2)*np.sinc(k3)) return W*field else: return field def ifft(field): '''This function performs the inverse Fourier transform. It uses the numpy function irfftn. The input array has first to be re-organized. In this code, the array filed is organized this way field=(z=0:Nyquist,y=-Nyquist:0:Nyquist,x=-Nyquist:0:Nyquist) which means shape(field)=(N//2+1,N,N) (Reminder: in the code, N is always odd while N_input is even, N=N_input+1). The python modules takes as an input an array organized as follow: field=(x=0:Nyquist-1:-1:-Nyquist, y=0:Nyquist-1:-1:-Nyquist, z=0:Nyquist) which means shape(field)=(N//2+1,N-1,N-1) Note that -Nyquist=+Nyquist since N_input is even. ''' field[0,params.N//2,:params.N//2]=np.conjugate(field[0,params.N//2,params.N//2+1:][::-1]) field[0,:params.N//2,:] =np.conjugate(field[0,params.N//2+1:,:][::-1,::-1]) return np.fft.irfftn(np.fft.ifftshift(field.transpose()[:-1,:-1],axes=(0,1)),(params.N-1,params.N-1,params.N-1) ) # Equivalent to : #new_field=np.zeros((N//2+1,N-1,N-1),dtype=np.complex64) #new_field[:,N//2+1:,N//2+1:]=field[:,1:N//2,1:N//2] #new_field[:,:N//2+1,:N//2+1]=field[:,N//2:,N//2:] #new_field[:,:N//2+1,N//2+1:]=field[:,N//2:,1:N//2] #new_field[:,N//2+1:,:N//2+1]=field[:,1:N//2,N//2:] #return np.fft.irfftn(new_field.transpose(),(N-1,N-1,N-1)) def fft(f_field): field=np.zeros((params.N//2+1,params.N,params.N),dtype=np.complex) field[:,:-1,:-1]=np.fft.fftshift(np.fft.rfftn(f_field),axes=(0,1)).transpose() field[:,-1],field[:,:,-1]=field[:,0],field[:,:,0] return field def read_h5(filename,dtype=np.float32): if len(filename)==0: f1 = h5py.File(params.output_path+params.key+'_{}{}_{}.h5'.format(field,order,real), 'r') dat1=np.array(f1['data'],dtype=dtype) return dat1 else: f1 = h5py.File(filename, 'r') dat1=np.array(f1['data'],dtype=dtype) return dat1 def save_h5(filename,f): hf = h5py.File(filename, 'w') # Save in h5 format hf.create_dataset('data', data=f) # hf.close() ####################################################################################### ####################################################################################### First order stochastic potential def zeta_realisation(k_grid): ''' Generate the linear curvature perturbation field (N//2+1,N,N) at redshift z in half of Fourier space. The reality condition ensures the other half. The computation is in 3 steps: -compute the modulus of k in the grid (k) -interpolate transfer function and primordial power spectrum tr=T(k) and P=P(k) -randomly draw the real/imaginary part of the primordial curvature zeta following a Gaussian PDF with std=sqrt(P(k)/2) ''' def random (k): with np.errstate(divide='ignore'): P=primordial(k) zeta_ini_Re=np.random.normal(0,(params.N-1)**3*np.sqrt(P/2*params.kmin**3/(2*np.pi)**3),k.shape) #https://nms.kcl.ac.uk/eugene.lim/AdvCos/lecture2.pdf zeta_ini_Im=np.random.normal(0,(params.N-1)**3*np.sqrt(P/2*params.kmin**3/(2*np.pi)**3),k.shape) # equivalent : #rho = np.random.normal(0,(N-1)**3*np.sqrt(P*params.kmin**3/(2*np.pi)**3),k.shape) #phase = np.random.uniform(0,2*np.pi,k.shape) #zeta_ini_Re=rho*np.cos(phase) #zeta_ini_Im=rho*np.sin(phase) return np.complex64(zeta_ini_Re+zeta_ini_Im*1j) k=np.sqrt(k_grid[0][params.N//2:]**2+k_grid[1]**2+k_grid[2]**2) zeta=random(k) zeta[np.isnan(zeta)]=0 # Even N in real space give a N+1 FFT grid with symmetries ! zeta[1:-1,-1,1:-1]=zeta[1:-1,0,1:-1] #z&x Plan zeta[1:-1,1:-1,-1]=zeta[1:-1,1:-1,0] #z&y Plan # Zmax plan Surfaces zeta[-1,1:params.N//2,1:params.N//2] =np.conjugate(zeta[-1,params.N//2+1:-1,params.N//2+1:-1][::-1,::-1]) zeta[-1,params.N//2+1:-1,1:params.N//2]=np.conjugate(zeta[-1,1:params.N//2,params.N//2+1:-1][::-1,::-1]) # Zmax plan lines X constant and Y constant zeta[-1,params.N//2,1:params.N//2]=np.conjugate(zeta[-1,params.N//2,params.N//2+1:-1][::-1]) zeta[-1,1:params.N//2,params.N//2]=
np.conjugate(zeta[-1,params.N//2+1:-1,params.N//2][::-1])
numpy.conjugate
import copy import cv2 # import torch from mindspore import Tensor import numpy as np from PIL import Image from util.config import config as cfg from util.misc import find_bottom, find_long_edges, split_edge_seqence, \ norm2, vector_sin, split_edge_seqence_by_step, sample, fourier_transform, \ clockwise, find_start_point def pil_load_img(path): image = Image.open(path) image = np.array(image) return image class TextInstance(object): def __init__(self, points, orient, text): self.orient = orient self.text = text self.bottoms = None self.e1 = None self.e2 = None if self.text != "#": self.label = 1 else: self.label = -1 remove_points = [] self.points = np.array(points) if len(points) > 4: # remove point if area is almost unchanged after removing it ori_area = cv2.contourArea(points) for p in range(len(points)): # attempt to remove p index = list(range(len(points))) index.remove(p) area = cv2.contourArea(points[index]) if np.abs(ori_area - area)/ori_area < 0.0017 and len(points) - len(remove_points) > 4: remove_points.append(p) self.points = np.array([point for i, point in enumerate(points) if i not in remove_points]) else: self.points = np.array(points) def find_bottom_and_sideline(self): self.bottoms = find_bottom(self.points) # find two bottoms of this Text self.e1, self.e2 = find_long_edges(self.points, self.bottoms) # find two long edge sequence def disk_cover(self, n_disk=15): """ cover text region with several disks :param n_disk: number of disks :return: """ inner_points1 = split_edge_seqence(self.points, self.e1, n_disk) inner_points2 = split_edge_seqence(self.points, self.e2, n_disk) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center radii = norm2(inner_points1 - center_points, axis=1) # disk radius return inner_points1, inner_points2, center_points, radii def equal_width_bbox_cover(self, step=16.0): inner_points1, inner_points2 = split_edge_seqence_by_step(self.points, self.e1, self.e2, step=step) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center return inner_points1, inner_points2, center_points def __repr__(self): return str(self.__dict__) def __getitem__(self, item): return getattr(self, item) class TextDataset(object): def __init__(self, transform, is_training=False): super().__init__() self.transform = transform self.is_training = is_training @staticmethod def fill_polygon(mask, pts, value): cv2.fillPoly(mask, [pts.astype(np.int32)], color=(value,)) def make_text_region(self, img, polygon, tr_mask, train_mask, x_map, y_map, k, scale=1/2): [h, w] = img.shape[:2] h = int(h * scale) w = int(w * scale) deal_mask = np.zeros((h, w), np.uint8) points = (polygon.points * scale).astype(np.int32) cv2.fillPoly(tr_mask, [points], color=(1,)) cv2.fillPoly(deal_mask, [points], color=(1,)) if polygon.text == '#': cv2.fillPoly(train_mask, [points], color=(0,)) pts = sample(polygon.points * scale) pts = find_start_point(pts) c = fourier_transform(pts, k) c = clockwise(c, k) vector_x = np.real(c) vector_y = np.imag(c) for i in range(-k, k+1): if i != 0: x_map[:, :, i + k] = deal_mask * vector_x[i + k] + (1 - deal_mask) * x_map[:, :, i + k] y_map[:, :, i + k] = deal_mask * vector_y[i + k] + (1 - deal_mask) * y_map[:, :, i + k] else: for y, x in np.argwhere(deal_mask > 0.5): x_map[y, x, k] = vector_x[k] - x y_map[y, x, k] = vector_y[k] - y def make_text_center_line(self, sideline1, sideline2, center_line, tcl_msk1, expand=0.3, shrink=1): p1 = np.mean(sideline1, axis=0) p2 = np.mean(sideline2, axis=0) vpp = vector_sin(p1 - p2) if vpp >= 0: top_line = sideline2 bot_line = sideline1 else: top_line = sideline1 bot_line = sideline2 if len(center_line) < 5: shrink = 0 for i in range(shrink, len(center_line) - 1 - shrink): c1 = center_line[i] c2 = center_line[i + 1] top1 = top_line[i] top2 = top_line[i + 1] bottom1 = bot_line[i] bottom2 = bot_line[i + 1] p1 = c1 + (top1 - c1) * expand p2 = c1 + (bottom1 - c1) * expand p3 = c2 + (bottom2 - c2) * expand p4 = c2 + (top2 - c2) * expand ploy1 = np.stack([p1, p2, p3, p4]) self.fill_polygon(tcl_msk1, ploy1, value=1) def get_training_data(self, image, polygons, k, image_id, image_path): H, W, _ = image.shape if self.transform: image, polygons = self.transform(image, copy.copy(polygons)) h, w, _ = image.shape tr_mask_3 = np.zeros((int(h/8), int(w/8), 1), np.uint8) train_mask_3 = np.ones((int(h/8), int(w/8), 1), np.uint8) tcl_mask_3 = np.zeros((int(h / 8), int(w / 8), 1), np.uint8) x_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) y_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) tr_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) train_mask_4 = np.ones((int(h/16), int(w/16), 1), np.uint8) tcl_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) x_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) y_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) tr_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) train_mask_5 = np.ones((int(h/32), int(w/32), 1), np.uint8) tcl_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) x_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) y_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) if polygons is not None: for polygon in polygons: x_max = polygon.points[:, 0].max() x_min = polygon.points[:, 0].min() y_max = polygon.points[:, 1].max() y_min = polygon.points[:, 1].min() dx = x_max - x_min dy = y_max - y_min criterion = max(dx, dy) / (h + 1e-5) polygon.find_bottom_and_sideline() sideline1, sideline2, center_points = polygon.equal_width_bbox_cover(step=4.0) if criterion < 0.4: self.make_text_region(image, polygon, tr_mask_3, train_mask_3, x_map_3, y_map_3, k, scale=1 / 8) self.make_text_center_line(sideline1/8, sideline2/8, center_points/8, tcl_mask_3) if criterion > 0.3 and criterion < 0.7: self.make_text_region(image, polygon, tr_mask_4, train_mask_4, x_map_4, y_map_4, k, scale=1 / 16) self.make_text_center_line(sideline1/16, sideline2/16, center_points/16, tcl_mask_4) if criterion > 0.6: self.make_text_region(image, polygon, tr_mask_5, train_mask_5, x_map_5, y_map_5, k, scale=1 / 32) self.make_text_center_line(sideline1/32, sideline2/32, center_points/32, tcl_mask_5) # clip value (0, 1) tr_mask_3 = np.clip(tr_mask_3, 0, 1) train_mask_3 = np.clip(train_mask_3, 0, 1) tcl_mask_3 = np.clip(tcl_mask_3, 0, 1) tr_mask_4 = np.clip(tr_mask_4, 0, 1) train_mask_4 = np.clip(train_mask_4, 0, 1) tcl_mask_4 = np.clip(tcl_mask_4, 0, 1) tr_mask_5 = np.clip(tr_mask_5, 0, 1) train_mask_5 = np.clip(train_mask_5, 0, 1) tcl_mask_5 = np.clip(tcl_mask_5, 0, 1) label_3 = np.concatenate([tr_mask_3, train_mask_3, x_map_3, y_map_3, tcl_mask_3], axis=2) label_4 =
np.concatenate([tr_mask_4, train_mask_4, x_map_4, y_map_4, tcl_mask_4], axis=2)
numpy.concatenate
from unittest import TestCase import os.path as osp import numpy as np from datumaro.components.annotation import AnnotationType, Bbox from datumaro.components.dataset import Dataset from datumaro.components.extractor import DatasetItem from datumaro.util.test_utils import TestDir, compare_datasets from datumaro.util.test_utils import run_datum as run import datumaro.plugins.voc_format.format as VOC from ..requirements import Requirements, mark_requirement class YoloIntegrationScenarios(TestCase): @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_save_and_load_yolo_dataset(self): target_dataset = Dataset.from_iterable([ DatasetItem(id='1', subset='train', image=np.ones((10, 15, 3)), annotations=[ Bbox(3.0, 3.0, 2.0, 3.0, label=4), Bbox(0.0, 2.0, 4.0, 2.0, label=2) ] ) ], categories=['label_' + str(i) for i in range(10)]) with TestDir() as test_dir: yolo_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))], 'tests', 'assets', 'yolo_dataset') run(self, 'create', '-o', test_dir) run(self, 'import', '-p', test_dir, '-f', 'yolo', yolo_dir) export_dir = osp.join(test_dir, 'export_dir') run(self, 'export', '-p', test_dir, '-o', export_dir, '-f', 'yolo', '--', '--save-images') parsed_dataset = Dataset.import_from(export_dir, format='yolo') compare_datasets(self, target_dataset, parsed_dataset) @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_export_mot_as_yolo(self): target_dataset = Dataset.from_iterable([ DatasetItem(id='1', subset='train', annotations=[ Bbox(0.0, 4.0, 4.0, 8.0, label=2) ] ) ], categories=['label_' + str(i) for i in range(10)]) with TestDir() as test_dir: mot_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))], 'tests', 'assets', 'mot_dataset') run(self, 'create', '-o', test_dir) run(self, 'import', '-p', test_dir, '-f', 'mot_seq', mot_dir) yolo_dir = osp.join(test_dir, 'yolo_dir') run(self, 'export', '-p', test_dir, '-o', yolo_dir, '-f', 'yolo', '--', '--save-images') parsed_dataset = Dataset.import_from(yolo_dir, format='yolo') compare_datasets(self, target_dataset, parsed_dataset) @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_convert_voc_to_yolo(self): target_dataset = Dataset.from_iterable([ DatasetItem(id='2007_000001', subset='train', image=np.ones((10, 20, 3)), annotations=[ Bbox(1.0, 2.0, 2.0, 2.0, label=8), Bbox(4.0, 5.0, 2.0, 2.0, label=15), Bbox(5.5, 6, 2, 2, label=22), ] ) ], categories=[label.name for label in VOC.make_voc_categories()[AnnotationType.label]]) with TestDir() as test_dir: voc_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))], 'tests', 'assets', 'voc_dataset', 'voc_dataset1') yolo_dir = osp.join(test_dir, 'yolo_dir') run(self, 'convert', '-if', 'voc', '-i', voc_dir, '-f', 'yolo', '-o', yolo_dir, '--', '--save-images') parsed_dataset = Dataset.import_from(yolo_dir, format='yolo') compare_datasets(self, target_dataset, parsed_dataset, require_images=True) @mark_requirement(Requirements.DATUM_GENERAL_REQ) def test_can_ignore_non_supported_subsets(self): source_dataset = Dataset.from_iterable([ DatasetItem(id='img1', subset='test', image=
np.ones((10, 20, 3))
numpy.ones
import numpy as np from scipy.integrate import odeint import scipy.integrate as integrator import matplotlib.pyplot as plt import time import sys from cliente import Cliente # cliente OPCUA import random import threading class QuadrupleTank(): def __init__(self, x0, Hmax, voltmax): self.x0 = x0 self.t = 0 # Parámetros self.A = [28, 32, 28, 32] # cm^2 self.a = [0.071, 0.057, 0.071, 0.057] # cm^2 self.g = 981 # cm/s^2 self.rho = 1 # g/cm^3 self.kout = 0.5 self.kin = 3.33 self.time_scaling = 1 # self.gamma = [0.7, 0.6] # % self.gamma = [0.35, 0.35] # % self.volt = [0., 0.] # % self.voltmax = voltmax self.x = self.x0 self.ti = 0 self.Ts = 0 self.Hmax = Hmax self.Hmin = 0.0 # Restricciones físicas de los tanques def Limites(self): for i in range(len(self.x)): if self.x[i] > self.Hmax: self.x[i] = self.Hmax elif self.x[i] < 1e-2: self.x[i] = 1e-2 for i in range(2): if self.volt[i] > 1: self.volt[i] = 1 elif self.volt[i] < -1: self.volt[i] = -1 # Ecuaciones diferenciales de los tanques def xd_func(self, x, t): xd0 = -self.a[0] / self.A[0] *
np.sqrt(2 * self.g * x[0])
numpy.sqrt
from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, \ BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE # Depreciation warnings import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import copy import os import shutil import numpy as np import tensorflow as tf class Vals: nv = 3 nh = 2 batch_size = 2 i_death = 0 i_birth = 0 i_predator = 1 i_prey = 0 _mu = np.array([19., 45., 62., 4., 8.]) _cov = np.array([ [30., 67., 107., 10., 9.], [67., 162., 241., 20., 27.], [107., 241., 402., 40., 27.], [10., 20., 40., 5., 0.], [9., 27., 27., 0., 9.] ]) _ncov = np.array([ [391., 922., 1285., 86., 161.], [922., 2187., 3031., 200., 387.], [1285., 3031., 4246., 288., 523.], [86., 200., 288., 21., 32.], [161., 387., 523., 32., 73.] ]) _mu_TE = np.array([3.0, 5.0, 2.0, 1.0, 0.8]) _ncov_TE = np.array([ [12.0, 6.0, 3.0, 2.0, 1.0], [6.0, 18.0, 4.0, 3.0, 1.0], [3.0, 4.0, 16.0, 2.0, 1.0], [2.0, 3.0, 2.0, 8.0, 0.5], [1.0, 1.0, 1.0, 0.5, 6.0] ]) @classmethod def mu_TE(cls): return np.tile(cls._mu_TE, (cls.batch_size,1)) @classmethod def ncov_TE(cls): return np.tile(cls._ncov_TE, (cls.batch_size,1,1)) @classmethod def mu(cls): return np.tile(cls._mu, (cls.batch_size,1)) @classmethod def cov(cls): return
np.tile(cls._cov, (cls.batch_size,1,1))
numpy.tile
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' NAME Global field generator for remapping intercomparison PURPOSE Reads 2 mesh data files (Exodus or SCRIP) and evaluates any one of, or combination of 3 fields (TPW, Cloud Fraction, Terrain) derived from Spherical Harmonic expansions of satellite global composite data. PROGRAMMER(S) <NAME>, <NAME>, <NAME> REVISION HISTORY REFERENCES ''' # %% import shutil import time import sys import getopt import pyshtools import math as mt import numpy as np from numpy import matlib import plotly as py import plotly.figure_factory as FF from scipy.spatial import Delaunay from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ from computeAreaIntegral import computeAreaIntegral, computeAreaIntegralWithGQ, getGaussNodesWeights import computeSphericalCartesianTransforms as sphcrt import multiprocessing from multiprocessing import Process from itertools import repeat # %% Utility functions def computeSpectrum(ND, lfPower, hfPower, degIntersect): psd = np.zeros(ND) # Compute power spectrum array from coefficients (Power Law assumed) degs = np.arange(ND, dtype=float) # degs[0] = np.inf degs[0] = 1.0E-8 # Check that we aren't fitting a constant function (Terrain) for ii in range(ND): if degs[ii] < degIntersect: if lfPower[1] > -5.0: psd[ii] = lfPower[0] * \ np.power(degs[ii], lfPower[1]) + lfPower[2] else: psd[ii] = lfPower[2] elif degs[ii] >= degIntersect: if hfPower[1] > -5.0: psd[ii] = hfPower[0] * \ np.power(degs[ii], hfPower[1]) + hfPower[2] else: psd[ii] = hfPower[2] return degs, psd def evaluate_field_a2(lon, lat): # thisVar = (2.0 + np.cos(dFLonLat[1]) * np.cos(dFLonLat[1]) * np.cos(2.0 * dFLonLat[0])) # test == 1 # thisVar = (2.0 + (np.sin(2.0 * dFLonLat[1]))**16.0 * np.cos(16.0 * dFLonLat[0])) # test == 2 # print(lon, lat, (2.0 + np.cos(lat) * np.cos(lat) * np.cos(2.0 * lon))) return (2.0 + np.cos(lat) * np.cos(lat) * np.cos(2.0 * lon)) def computeCellAverageSerial(clm, varCon, varCoord, order, avg): # Compute the number of cells and initialize NEL = np.size(varCon, 0) varSample = np.zeros(NEL) # Loop over each cell and get cell average for ii in range(NEL): # NP.UNIQUE SORTS AND DESTROYS CONNECTIVITY CELL NORMALS!!! cdex = varCon[ii, :] - 1 thisCell = varCoord[:, cdex] varSample[ii] = computeAreaIntegral(clm, thisCell, order, avg, False) return varSample def computeCellAverage(clm, varCon, varCoord, order, avg, nprocs): # return computeCellAverageSerial(clm, varCon, varCoord, order, avg) # Compute the number of cells and initialize NEL = np.size(varCon, 0) varSample = np.zeros(NEL,) GN, GW = getGaussNodesWeights(order) # Loop over each cell and get cell average pool = multiprocessing.Pool(processes=nprocs) results = pool.starmap(computeAreaIntegralWithGQ, zip( repeat(clm), [varCoord[:, varCon[ii, :] - 1] for ii in range(NEL)], repeat(GN), repeat(GW), repeat(avg), repeat(False))) pool.close() pool.join() varSample = np.array(results, dtype='f8')[:, 0] varAreas = np.array(results, dtype='f8')[:, 1] return varSample def computeRandomizedCoefficients(ND): # Initialize the coefficients array coeffs = np.zeros((2, ND, ND)) # Set the random integer seed seed = 384 # Loop over ND (number of degrees) for kk in range(ND): nrand = np.ones((2, kk + 1)) # Initialize random numbers with number of coefficients at this degree if kk == 0: rand = (1103515245 * seed + 25214903917 + 12345) % 2147483647 # Loop over the coefficients at this degree for ll in range(0, kk + 1): nrand[0, ll] = rand rand = (1103515245 * rand + 25214903917 + 12345) % 2147483647 nrand[1, ll] = rand rand = (1103515245 * rand + 25214903917 + 12345) % 2147483647 # Turn the random set into double nrand = np.multiply(nrand, 1.0 / 2147483647.0) # Set the coefficients at degree kk+1 coeffs[:2, kk, :kk + 1] = 2.0 * np.add(2.0 * nrand[:2, :], -1.0) return coeffs def computeNormalizedCoefficients(N, psd, coeffsLD): # Initialize SHCoeffs with a randomized realization of coefficients clm = pyshtools.SHCoeffs.from_random(psd, seed=384) # Compute the randomized coefficients and update instance of SHCoeffs clm.coeffs = computeRandomizedCoefficients(ND) # Force the coefficients to have the same power as the given spectrum power_per_l = pyshtools.spectralanalysis.spectrum( clm.coeffs, normalization='4pi', unit='per_l') clm.coeffs *= np.sqrt(psd[0:ND] * np.reciprocal(power_per_l))[np.newaxis, :, np.newaxis] # Combine the coefficients, low degree from data and high degree randomized clm.coeffs[0, 0:4, 0:4] = coeffsLD # Returns the SH coefficients object return clm # Parse the command line def parseCommandLine(argv): # Mesh information files sampleMesh = '' ExodusSingleConn = False ExodusMultiConn = False SCRIPwithoutConn = False SCRIPwithConn = False SpectralElement = False # Sampling order sampleCentroid = False sampleOrder = 4 # SET WHICH FIELDS TO EVALUATE EvaluateAll = False EvaluateTPW = False # Total Precipitable Water EvaluateCFR = False # Global Cloud Fraction EvaluateTPO = False # Global topography EvaluateA1 = False # Analytical function 1 EvaluateA2 = False # Analytical function 2 ShowPlots = False # Whether we want to show the profile plots for variables # Number of modes used up to 512 numModes = 32 # Pseudo-random number generator seed seed = 384 # Number of processes to use for sampling nprocs = 1 def usage(): print('Driver Usage:\n', 'CANGAFieldGenDriver.py', '--pm <sampleMeshFile>', '--so <sampleOrderInteger>', '--nm <numberSHModesMax768>', '--rseed <randnumSeed>', '--evaluateAllFields', '--evaluateTotalPrecipWater', '--evaluateCloudFraction', '--evaluateGlobalTerrain', '--evaluateA1', '--evaluateA2', '--showPlots', '--meshConfiguration', '--SpectralElementMesh', '--processes <nprocs>') try: opts, args = getopt.getopt(argv, 'hv:', ['pm=', 'so=', 'nm=', 'rseed=', 'evaluateAllFields', 'evaluateTotalPrecipWater', 'evaluateCloudFraction', 'evaluateGlobalTerrain', 'evaluateA1', 'evaluateA2', 'showPlots', 'ExodusSingleConn', 'ExodusMultiConn', 'SCRIPwithoutConn', 'SCRIPwithConn', 'SpectralElementMesh', 'processes=']) except getopt.GetoptError: print('Command line arguments were not properly set or error in parsing.\n') usage() sys.exit(2) for opt, arg in opts: # Request for usage help if opt == '-h': usage() sys.exit() elif opt == '--pm': sampleMesh = arg elif opt == '--so': if int(arg) == 1: sampleOrder = int(arg) sampleCentroid = True else: if int(arg) % 2 == 0 and int(arg) < 200: sampleOrder = int(arg) else: sys.exit( "[FATAL] Error in option passed for --so. Sample order must be \\in (0, 200)") elif opt == '--nm': numModes = int(arg) elif opt == '--rseed': seed = int(arg) elif opt == '--evaluateAllFields': EvaluateAll = True elif opt == '--evaluateTotalPrecipWater': EvaluateTPW = True elif opt == '--evaluateCloudFraction': EvaluateCFR = True elif opt == '--evaluateGlobalTerrain': EvaluateTPO = True elif opt == '--evaluateA1': EvaluateA1 = True elif opt == '--evaluateA2': EvaluateA2 = True elif opt == '--ExodusSingleConn': ExodusSingleConn = True elif opt == '--ExodusMultiConn': ExodusMultiConn = True elif opt == '--SCRIPwithoutConn': SCRIPwithoutConn = True elif opt == '--SCRIPwithConn': SCRIPwithConn = True elif opt == '--SpectralElementMesh': SpectralElement = True elif opt == '--showPlots': ShowPlots = True elif opt == '--processes': nprocs = int(arg) # Check that the number of modes requested doesn't exceed 512 if numModes > 512: print('Setting maximum number of expansion modes: 512.') numModes = 512 # Check that only one configuration is chosen configs = [ ExodusSingleConn, ExodusMultiConn, SCRIPwithoutConn, SCRIPwithConn] numConfigs = sum(bool(x) for x in configs) if numConfigs > 1: print('ONE mesh configuration option must be set!') print('None of the options are set.') sys.exit(2) if EvaluateAll: EvaluateTPW = EvaluateCFR = EvaluateTPO = EvaluateA1 = EvaluateA2 = True if 2 * sampleOrder - 1 < numModes: print("WARNING: The quadrature sampling order of %d is insufficient to exactly integrate SPH expansions of order %d!" % ( sampleOrder, numModes)) return sampleMesh, numModes, seed, \ sampleCentroid, sampleOrder, \ EvaluateTPW, EvaluateCFR, EvaluateTPO, \ EvaluateA1, EvaluateA2, ShowPlots, \ ExodusSingleConn, ExodusMultiConn, SCRIPwithoutConn, \ SCRIPwithConn, SpectralElement, nprocs if __name__ == '__main__': print('Welcome to CANGA remapping intercomparison field generator!') print('Authors: <NAME>, <NAME>, <NAME>, 2019') # Parse the commandline! COMMENT OUT TO RUN IN IDE mesh_file, ND, seed, sampleCentroid, sampleOrder, \ EvaluateTPW, EvaluateCFR, EvaluateTPO, \ EvaluateA1, EvaluateA2, ShowPlots, \ ExodusSingleConn, ExodusMultiConn, SCRIPwithoutConn, \ SCRIPwithConn, SpectralElement, nprocs \ = parseCommandLine(sys.argv[1:]) # Set the name for the new data file stripDir = mesh_file.split('/') onlyFilename = stripDir[len(stripDir) - 1] data_file = 'sample_NM' + \ str(ND) + '_O' + str(sampleOrder) + '_' + (onlyFilename.split('.'))[0] # Let us decipher what our final output file name should be with # approrpriate suffixes outFileName = data_file if SpectralElement: outFileName += '_GLL' if EvaluateTPW: outFileName += '_TPW' if EvaluateCFR: outFileName += '_CFR' if EvaluateTPO: outFileName += '_TPO' if EvaluateA1: outFileName += '_A1' if EvaluateA2: outFileName += '_A2' outFileName += '.nc' print('File name for sampled mesh data: ', outFileName) print('Number of SH degrees for sampling set to: ', ND) print('Maximum Gaussian quadrature order to be used: ', 2 * sampleOrder - 1) if ExodusSingleConn or ExodusMultiConn: if SpectralElement: connCell = 'element_gll_conn' coordCell = 'grid_gll_cart' else: if ExodusSingleConn: connCell = 'connect1' elif ExodusMultiConn: connCell = 'connect0' coordCell = 'coord' # Open the .g mesh files for reading m_fid = Dataset(mesh_file) # Get connectivity and coordinate arrays (check for multiple # connectivity) varCon = m_fid.variables[connCell][:] varCoord = m_fid.variables[coordCell][:] # Get the rectilinear attribute if available try: print('Rectilinear mesh detected; field variable written as 2D') rectilinear = m_fid.rectilinear # Get the 2D size of the field array from mesh file NLON = m_fid.rectilinear_dim1_size NLAT = m_fid.rectilinear_dim0_size except BaseException: print('NOT a rectilinear mesh.') rectilinear = False elif ExodusMultiConn: numElTypes = 'num_el_blk' numDims = 'cart_dims' connCell = 'element_corners_id' coordCell = 'grid_corners_cart' numVerts = 'grid_corners_size' # Open the .g mesh files for reading m_fid = Dataset(mesh_file) start = time.time() # Get connectivity and coordinate arrays varConnList = [] numVertList = [] numConnBlocks = len(m_fid.dimensions[numElTypes]) for cc in range(numConnBlocks): # Get this connectivity array (El X corners) connName = 'connect' + str(cc + 1) thisConn = m_fid.variables[connName][:] # Get the number of corners for this connectivity block # Column dimension of connectivity numVertList.append(thisConn.shape[1]) # Append to the list of connectivity blocks varConnList.append(m_fid.variables[connName][:]) # Get the maximum number of vertices maxVerts = np.amax(np.array(numVertList)) # Loop over the blocks again and pad columns up to the max vertices for cc in range(numConnBlocks): numVert2Pad = maxVerts - numVertList[cc] if numVert2Pad == 0: continue # Pad with redundant last coord ID up to the max vertices lastCol = np.expand_dims(varConnList[cc][:, -1], axis=1) thisPadding = np.matlib.repmat(lastCol, 1, numVert2Pad) varConnList[cc] = np.hstack((varConnList[cc], thisPadding)) # Vertical stack of the connectivity lists varCon = np.vstack(tuple(varConnList)) varCoord = m_fid.variables['coord'][:] try: print('Storing connectivity and coordinate arrays from Exodus mesh files.') numEdges = 'num_nod_per_el' numCells = 'num_el_in_blk' meshFileOut = m_fid.createDimension(numEdges, maxVerts) meshFileOut = m_fid.createDimension(numCells, varCon.shape[0]) meshFileOut = m_fid.createDimension(numVerts, np.size(varCoord, 1)) meshFileOut = m_fid.createDimension(numDims, 3) meshFileOut = m_fid.createVariable( connCell, 'i4', (numCells, numEdges)) meshFileOut[:] = varCon meshFileOut = m_fid.createVariable( coordCell, 'f8', (numDims, numVerts)) meshFileOut[:] = varCoord except RuntimeError: print('Cell connectivity and grid vertices exist in mesh data file.') endt = time.time() print( 'Time to precompute EXODUS multi-connectivity mesh info (sec): ', endt - start) elif SCRIPwithoutConn: numEdges = 'grid_corners' numCells = 'grid_size' numDims = 'cart_dims' numVerts = 'grid_corners_size' if SpectralElement: connCell = 'element_gll_conn' coordCell = 'grid_gll_cart' else: connCell = 'element_corners_id' coordCell = 'grid_corners_cart' # Open the .nc SCRIP files for reading m_fid = Dataset(mesh_file) start = time.time() try: print('Reading connectivity and coordinate arrays from raw SCRIP') varCon = m_fid.variables[connCell][:] varCoord = m_fid.variables[coordCell][:] except BaseException: print('PRE-PROCESSING NOT DONE ON THIS MESH FILE!') endt = time.time() print('Time to read SCRIP mesh info (sec): ', endt - start) elif SCRIPwithConn: numEdges = 'ncorners' numCells = 'ncells' numDims = 'cart_dims' if SpectralElement: connCell = 'element_gll_conn' coordCell = 'grid_gll_cart' else: connCell = 'element_corners' coordCell = 'grid_corners_cart' # Open the .nc SCRIP files for reading m_fid = Dataset(mesh_file) # Get the list of available variables varList = m_fid.variables.keys() # Get RAW (no ID) connectivity and coordinate arrays varCon = m_fid.variables[connCell][:] varCon = varCon.T start = time.time() try: print('Reading coordinate arrays from raw SCRIP') varCoord = m_fid.variables[coordCell][:] except BaseException: print('PRE-PROCESSING NOT DONE ON THIS MESH FILE!') endt = time.time() print('Time to read SCRIP mesh info (sec): ', endt - start) if SpectralElement: # Compute Lon/Lat coordinates from GLL nodes varLonLat = sphcrt.computeCart2LL(varCoord.T) else: # Compute Lon/Lat coordinates from centroids varCent = sphcrt.computeCentroids(varCon, varCoord) varLonLat = sphcrt.computeCart2LL(varCent) # Convert to degrees from radians varLonLat_deg = 180.0 / mt.pi * varLonLat m_fid.close() # Define our global variables for fields TPWvar = np.zeros(3) CFRvar = np.zeros(3) TPOvar = np.zeros(3) # %% Begin the SH reconstructions def Evaluate_TPW_Field(): start = time.time() print('Computing Total Precipitable Water on sampling mesh...') # Set the power spectrum coefficients lfPower = [5.84729561e+04, -2.91678103e-04, -5.83966265e+04] hfPower = [2.17936330e+02, -1.99788552e+00, -7.94469251e-04] degIntersect = 1.8161917668847762 # Compute the parent power spectrum for TPW degsTPW, psdTPW = computeSpectrum(ND, lfPower, hfPower, degIntersect) # Set the low degree coefficients (large scale structures) coeffsLD_TPW = np.array([[2.45709150e+01, 0.0, 0.0, 0.0], [4.00222122e+00, 2.39412571e+00, 0.0, 0.0], [-1.36433589e+01, 3.90520866e-03, 4.70350344e-01, 0.0], [-3.54931720e+00, -1.23629157e+00, 4.01454924e-01, 1.76782768e+00]]) # Make the SH coefficients object for this field clmTPW = computeNormalizedCoefficients(ND, psdTPW, coeffsLD_TPW) # Evaluate actual spherical harmonic modes as solution; # change ls, ms below # lmax = 100 # clmTPW = pyshtools.SHCoeffs.from_zeros(lmax) # clmTPW.set_coeffs(values=[1], ls=[2], ms=[2]) # THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS # Expand the coefficients and check the field if sampleCentroid or SpectralElement: TPWvar = clmTPW.expand( lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1]) else: TPWvar = computeCellAverage( clmTPW, varCon, varCoord, sampleOrder, True, nprocs) print('Total Precipitable Water Global integral: ', np.sum(TPWvar)) # Compute rescaled data from 0.0 to max minTPW = np.amin(TPWvar) maxTPW = np.amax(TPWvar) deltaTPW = abs(maxTPW - minTPW) deltaTPW = deltaTPW if deltaTPW > 1e-10 else 1.0 TPWvar = np.add(TPWvar, -minTPW) TPWvar *= maxTPW / deltaTPW endt = time.time() print('Time to compute TPW (mm): ', endt - start) return_dict['TPWvar'] = TPWvar # %% def Evaluate_CFR_Field(): start = time.time() print('Computing Cloud Fraction on sampling mesh...') # Set the power spectrum coefficients lfPower = [8.38954430e+00, -1.85962382e-04, -8.38439294e+00] hfPower = [1.25594628e-01, -1.99203168e+00, 1.91763519e-06] degIntersect = 8.322269484619733 # Compute the parent power spectrum for CFR degsCFR, psdCFR = computeSpectrum(ND, lfPower, hfPower, degIntersect) # Set the low degree coefficients (large scale structures) coeffsLD_CFR = np.array([[6.65795054e-01, 0.0, 0.0, 0.0], [-2.45480409e-02, 2.24697424e-02, 0.0, 0.0], [5.72322008e-02, 3.41184683e-02, - 7.71082815e-03, 0.0], [1.86562455e-02, 4.34697733e-04, 8.91735978e-03, -5.53756958e-03]]) # Make the SH coefficients object for this field clmCFR = computeNormalizedCoefficients(ND, psdCFR, coeffsLD_CFR) # THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS # Expand the coefficients and check the field if sampleCentroid or SpectralElement: CFRvar = clmCFR.expand( lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1]) else: CFRvar = computeCellAverage( clmCFR, varCon, varCoord, sampleOrder, True, nprocs) print('Cloud Fraction Global integral: ', np.sum(CFRvar)) # Compute rescaled data from 0.0 to max minCFR = np.amin(CFRvar) maxCFR = np.amax(CFRvar) deltaCFR = abs(maxCFR - minCFR) deltaCFR = deltaCFR if deltaCFR > 1e-10 else 1.0 CFRvar = np.add(CFRvar, -minCFR) CFRvar *= maxCFR / deltaCFR # Set all values greater than 1.0 to 1.0 (creates discontinuities) CFRvar[CFRvar >= 1.0] = 1.0 endt = time.time() print('Time to compute CFR (0.0 to 1.0): ', endt - start) return_dict['CFRvar'] = CFRvar # %% def Evaluate_TPO_Field(): start = time.time() print('Computing Global Terrain on sampling mesh...') # Set the power spectrum coefficients lfPower = [1.79242815e+05, -4.28193211e+01, 7.68040558e+05] hfPower = [9.56198160e+06, -1.85485966e+00, -2.63553217e+01] degIntersect = 3.8942282772035255 # Compute the parent power spectrum for CFR degsTPO, psdTPO = computeSpectrum(ND, lfPower, hfPower, degIntersect) # Set the low degree coefficients (large scale structures) coeffsLD_TPO = np.array([[-2.38452711e+03, 0.0, 0.0, 0.0], [-6.47223253e+02, -6.06453097e+02, 0.0, 0.0], [5.67394318e+02, 3.32672611e+02, - 4.17639577e+02, 0.0], [1.57403492e+02, 1.52896988e+02, 4.47106726e+02, -1.40553447e+02]]) # Make the SH coefficients object for this field clmTPO = computeNormalizedCoefficients(ND, psdTPO, coeffsLD_TPO) # THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS # Expand the coefficients and check the field if sampleCentroid or SpectralElement: TPOvar = clmTPO.expand( lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1]) else: TPOvar = computeCellAverage( clmTPO, varCon, varCoord, sampleOrder, True, nprocs) print('Global Terrain Global integral: ', np.sum(TPOvar)) # Rescale to -1.0 to 1.0 minTPO = np.amin(TPOvar) maxTPO = np.amax(TPOvar) deltaTPO = abs(maxTPO - minTPO) deltaTPO = deltaTPO if deltaTPO > 1e-10 else 1.0 TPOvar = np.add(TPOvar, -0.5 * (maxTPO + minTPO)) TPOvar *= 2.0 / deltaTPO # Rescale topography to real Earth max/min minTPO = -10994.0 # Depth at Challenger Deep maxTPO = 8848.0 # Elevation of Mt. Everest ASL deltaTPO = abs(maxTPO - minTPO) TPOvar *= (0.5 * deltaTPO) TPOvar += 0.5 * (maxTPO + minTPO) endt = time.time() print('Time to compute TPO (m): ', endt - start) return_dict['TPOvar'] = TPOvar # %% def Evaluate_A1_Field(): start = time.time() print('Computing Analytical Field 1 sampling on mesh...') # Evaluate actual spherical harmonic modes as solution; # change ls, ms below lmax = 100 clmA1 = pyshtools.SHCoeffs.from_zeros(lmax) # This evaluates P_3^3 clmA1.set_coeffs(values=[1], ls=[3], ms=[2]) clmA1.set_coeffs(values=[1], ls=[3], ms=[3]) # THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS # Expand the coefficients and check the field if sampleCentroid or SpectralElement: A1var = clmA1.expand( lon=varLonLat_deg[:, 0], lat=varLonLat_deg[:, 1]) print( 'Analytical Solution 1 Global sum: ', np.sum(A1var) / A1var.shape[0]) else: A1var = computeCellAverage( clmA1, varCon, varCoord, sampleOrder, True, nprocs) print( 'Analytical Solution 1 Global integral: ', np.sum(A1var) / A1var.shape[0]) endt = time.time() print('Time to compute A1 Field: ', endt - start) return_dict['A1var'] = A1var # %% def Evaluate_A2_Field(): start = time.time() print('Computing Analytical Field 2 sampling on mesh...') # THIS NEEDS TO CHANGE TO SUPPORT FE GRIDS # Expand the coefficients and check the field # if sampleCentroid or SpectralElement: if sampleCentroid or SpectralElement: A2var = evaluate_field_a2(lon=varLonLat[:, 0], lat=varLonLat[:, 1]) print( 'Analytical Solution 2 Global sum: ', np.sum(A2var) / A2var.shape[0]) else: # A2var = computeCellAverageSerial(evaluate_field_a2, varCon, varCoord, sampleOrder, True) A2var = computeCellAverage( evaluate_field_a2, varCon, varCoord, sampleOrder, True, nprocs) print( 'Analytical Solution 2 Global integral: ', np.sum(A2var) / A2var.shape[0]) endt = time.time() print('Time to compute A2 Field: ', endt - start) return_dict['A2var'] = A2var # %% manager = multiprocessing.Manager() return_dict = manager.dict() # Let us aggregate all the jobs that need to be done and then # let the multiprocessing manager take care of it. jobs = [] evaluation_routines = [] if EvaluateTPW: evaluation_routines.append(Evaluate_TPW_Field) if EvaluateCFR: evaluation_routines.append(Evaluate_CFR_Field) if EvaluateTPO: evaluation_routines.append(Evaluate_TPO_Field) if EvaluateA1: evaluation_routines.append(Evaluate_A1_Field) if EvaluateA2: evaluation_routines.append(Evaluate_A2_Field) for fn in evaluation_routines: p = Process(target=fn) jobs.append(p) p.start() for p in jobs: p.join() if EvaluateTPW: TPWvar = return_dict['TPWvar'] if EvaluateCFR: CFRvar = return_dict['CFRvar'] if EvaluateTPO: TPOvar = return_dict['TPOvar'] if EvaluateA1: A1var = return_dict['A1var'] if EvaluateA2: A2var = return_dict['A2var'] # %% Copy grid files and store the new test data (source and target) shutil.copy(mesh_file, outFileName) # write lon, lat, and test data variables data_fid = Dataset(outFileName, 'a') # Set the dimension name depending on the mesh file format if ExodusSingleConn: numCells = 'num_el_in_blk1' elif ExodusMultiConn: numCells = 'num_el_in_blk0' elif SCRIPwithoutConn: numCells = 'grid_size' elif SCRIPwithConn: numCells = 'ncells' if SpectralElement: numCells = 'grid_gll_size' # Process the sampling file if SCRIPwithConn: lonNC = data_fid.createVariable('nlon', 'f8', (numCells,)) lonNC[:] = varLonLat_deg[:, 0] latNC = data_fid.createVariable('nlat', 'f8', (numCells,)) latNC[:] = varLonLat_deg[:, 1] else: lonNC = data_fid.createVariable( 'lon', 'f8', (numCells,)) if 'lon' not in data_fid.variables.keys() else data_fid.variables['lon'] lonNC[:] = varLonLat_deg[:, 0] latNC = data_fid.createVariable( 'lat', 'f8', (numCells,)) if 'lat' not in data_fid.variables.keys() else data_fid.variables['lat'] latNC[:] = varLonLat_deg[:, 1] if rectilinear: slon = 'lonDim' slat = 'latDim' data_fid.createDimension(slon, NLON) data_fid.createDimension(slat, NLAT) if EvaluateTPW: TPWNC = data_fid.createVariable('TotalPrecipWater', 'f8', (slat, slon)) if 'TotalPrecipWater' not in data_fid.variables.keys( ) else data_fid.variables['TotalPrecipWater'] field =
np.reshape(TPWvar, (NLAT, NLON))
numpy.reshape
#! /usr/bin/env python3 import os import sys import numpy as np from multiprocessing import Pool from datetime import datetime import arrow data_dir = 'raw_data/' out_dir = 'clean_data/' out_dir = os.path.dirname(out_dir) + '/' if out_dir: os.makedirs(out_dir, exist_ok=True) def decode_to_bool(bytes_to_decode): if bytes_to_decode == b'True': return True else: return False def process_motion(fname): print(fname) data = np.loadtxt(data_dir + fname, dtype = 'bool', delimiter=',', usecols=1, converters = {1:decode_to_bool}) times = np.loadtxt(data_dir + fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:np.datetime64}) times = (times + np.timedelta64(30, 's')).astype('datetime64[m]') return clean_and_save(times, data, fname) def process_light(fname): data = np.loadtxt(data_dir+fname, delimiter=',', usecols=1) times = np.loadtxt(data_dir+fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:np.datetime64}) times = (times + np.timedelta64(30, 's')).astype('datetime64[m]') return clean_and_save(times, data, fname) def clean_and_save(times, data, fname): # split data into multiple spans if data represents multiple different # periods seperated by at least a day of no data ind = [] previous_time = times[0] for i, time in enumerate(times): if time - previous_time >= np.timedelta64(1, 'D'): ind.append(i) previous_time = time time_spans = np.split(times, ind) data_spans = np.split(data, ind) for data_span, time_span in zip(data_spans, time_spans): # Fill in or average duplicates in uncleaned data # if multiple data points represent the same minute, average them # if a minute's data point is missing, use the previous minutes value # if we aren't looking at several days or more of data, skip it if time_span[-1] - time_span[0] < np.timedelta64(4, 'D'): continue minutes = np.arange(time_span[0], time_span[-1], dtype='datetime64[m]') clean_data = np.ndarray((minutes.shape[0], 2)) for i, minute in enumerate(minutes): clean_data[i, 0] = arrow.get(minute.astype(datetime)).timestamp match = data_span[time_span == minute] if match.shape[0] > 1: if type(match[0]) is np.bool_: clean_data[i,1] =
np.mean(match)
numpy.mean
"""Contains the audio featurizer class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from data_utils.utility import read_manifest from data_utils.audio import AudioSegment from python_speech_features import mfcc from python_speech_features import delta class AudioFeaturizer(object): """Audio featurizer, for extracting features from audio contents of AudioSegment or SpeechSegment. Currently, it supports feature types of linear spectrogram and mfcc. :param specgram_type: Specgram feature type. Options: 'linear'. :type specgram_type: str :param stride_ms: Striding size (in milliseconds) for generating frames. :type stride_ms: float :param window_ms: Window size (in milliseconds) for generating frames. :type window_ms: float :param max_freq: When specgram_type is 'linear', only FFT bins corresponding to frequencies between [0, max_freq] are returned; when specgram_type is 'mfcc', max_feq is the highest band edge of mel filters. :types max_freq: None|float :param target_sample_rate: Audio are resampled (if upsampling or downsampling is allowed) to this before extracting spectrogram features. :type target_sample_rate: float :param use_dB_normalization: Whether to normalize the audio to a certain decibels before extracting the features. :type use_dB_normalization: bool :param target_dB: Target audio decibels for normalization. :type target_dB: float """ def __init__(self, specgram_type='linear', stride_ms=10.0, window_ms=20.0, max_freq=None, target_sample_rate=16000, use_dB_normalization=True, target_dB=-20): self._specgram_type = specgram_type self._stride_ms = stride_ms self._window_ms = window_ms self._max_freq = max_freq self._target_sample_rate = target_sample_rate self._use_dB_normalization = use_dB_normalization self._target_dB = target_dB def featurize(self, audio_segment, allow_downsampling=True, allow_upsampling=True): """Extract audio features from AudioSegment or SpeechSegment. :param audio_segment: Audio/speech segment to extract features from. :type audio_segment: AudioSegment|SpeechSegment :param allow_downsampling: Whether to allow audio downsampling before featurizing. :type allow_downsampling: bool :param allow_upsampling: Whether to allow audio upsampling before featurizing. :type allow_upsampling: bool :return: Spectrogram audio feature in 2darray. :rtype: ndarray :raises ValueError: If audio sample rate is not supported. """ # upsampling or downsampling if ((audio_segment.sample_rate > self._target_sample_rate and allow_downsampling) or (audio_segment.sample_rate < self._target_sample_rate and allow_upsampling)): audio_segment.resample(self._target_sample_rate) if audio_segment.sample_rate != self._target_sample_rate: raise ValueError("Audio sample rate is not supported. " "Turn allow_downsampling or allow up_sampling on.") # decibel normalization if self._use_dB_normalization: audio_segment.normalize(target_db=self._target_dB) # extract spectrogram return self._compute_specgram(audio_segment.samples, audio_segment.sample_rate) def _compute_specgram(self, samples, sample_rate): """Extract various audio features.""" if self._specgram_type == 'linear': return self._compute_linear_specgram( samples, sample_rate, self._stride_ms, self._window_ms, self._max_freq) elif self._specgram_type == 'mfcc': return self._compute_mfcc(samples, sample_rate, self._stride_ms, self._window_ms, self._max_freq) else: raise ValueError("Unknown specgram_type %s. " "Supported values: linear." % self._specgram_type) def _compute_linear_specgram(self, samples, sample_rate, stride_ms=10.0, window_ms=20.0, max_freq=None, eps=1e-14): """Compute the linear spectrogram from FFT energy.""" if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " "sample rate.") if stride_ms > window_ms: raise ValueError("Stride size must not be greater than " "window size.") stride_size = int(0.001 * sample_rate * stride_ms) window_size = int(0.001 * sample_rate * window_ms) specgram, freqs = self._specgram_real( samples, window_size=window_size, stride_size=stride_size, sample_rate=sample_rate) ind = np.where(freqs <= max_freq)[0][-1] + 1 return np.log(specgram[:ind, :] + eps) def _specgram_real(self, samples, window_size, stride_size, sample_rate): """Compute the spectrogram for samples from a real signal.""" # extract strided windows truncate_size = (len(samples) - window_size) % stride_size samples = samples[:len(samples) - truncate_size] nshape = (window_size, (len(samples) - window_size) // stride_size + 1) nstrides = (samples.strides[0], samples.strides[0] * stride_size) windows = np.lib.stride_tricks.as_strided( samples, shape=nshape, strides=nstrides) assert np.all( windows[:, 1] == samples[stride_size:(stride_size + window_size)]) # window weighting, squared Fast Fourier Transform (fft), scaling weighting = np.hanning(window_size)[:, None] fft = np.fft.rfft(windows * weighting, axis=0) fft = np.absolute(fft) fft = fft**2 scale = np.sum(weighting**2) * sample_rate fft[1:-1, :] *= (2.0 / scale) fft[(0, -1), :] /= scale # prepare fft frequency list freqs = float(sample_rate) / window_size * np.arange(fft.shape[0]) return fft, freqs def _compute_mfcc(self, samples, sample_rate, stride_ms=10.0, window_ms=20.0, max_freq=None): """Compute mfcc from samples.""" if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " "sample rate.") if stride_ms > window_ms: raise ValueError("Stride size must not be greater than " "window size.") # compute the 13 cepstral coefficients, and the first one is replaced # by log(frame energy) mfcc_feat = mfcc( signal=samples, samplerate=sample_rate, winlen=0.001 * window_ms, winstep=0.001 * stride_ms, highfreq=max_freq) # Deltas d_mfcc_feat = delta(mfcc_feat, 2) # Deltas-Deltas dd_mfcc_feat = delta(d_mfcc_feat, 2) # transpose mfcc_feat = np.transpose(mfcc_feat) d_mfcc_feat = np.transpose(d_mfcc_feat) dd_mfcc_feat =
np.transpose(dd_mfcc_feat)
numpy.transpose
from odbAccess import * from abaqusConstants import * from textRepr import * import timeit import numpy as np import os import sys start_time = timeit.default_timer() index = sys.argv[-1] # print(index) # index = float(index) index = int(index) # print(index) odbFile = os.path.join(os.getcwd(), "single_element_simulation_" + str(index) + ".odb") odb = openOdb(path=odbFile) step1 = odb.steps.values()[0] his_key = 'Element PART-1-1.1 Int Point 1 Section Point 1' region = step1.historyRegions[his_key] LE22 = region.historyOutputs['LE22'].data S22 = region.historyOutputs['S22'].data # t = np.array(LE22)[:, 0] x =
np.array(LE22)
numpy.array
import dask.array as da import inspect as insp import numpy as np from numba import njit, prange from edt import edt import scipy.ndimage as spim import scipy.spatial as sptl from skimage.segmentation import watershed from skimage.morphology import ball, disk, square, cube from porespy.tools import _check_for_singleton_axes from porespy.tools import extend_slice from porespy.tools import Results from porespy.filters import chunked_func from porespy import settings from loguru import logger def snow_partitioning(im, dt=None, r_max=4, sigma=0.4): r""" Partition the void space into pore regions using a marker-based watershed algorithm, with specially filtered peaks as markers. Parameters ---------- im : array_like A boolean image of the domain, with ``True`` indicating the pore space and ``False`` elsewhere. dt : array_like, optional The distance transform of the pore space. This is done automatically if not provided, but if the distance transform has already been computed then supplying it can save some time. r_max : int The radius of the spherical structuring element to use in the Maximum filter stage that is used to find peaks. The default is 4. sigma : float The standard deviation of the Gaussian filter used in step 1. The default is 0.4. If 0 is given then the filter is not applied, which is useful if a distance transform is supplied as the ``im`` argument that has already been processed. Returns ------- results : Results object A custom object with the follow data as attributes: - 'im' The binary image of the void space - 'dt' The distance transform of the image - 'peaks' The peaks of the distance transform after applying the steps of the SNOW algorithm - 'regions' The void space partitioned into pores using a marker based watershed with the peaks found by the SNOW algorithm Notes ----- The SNOW network extraction algorithm (Sub-Network of an Over-segmented Watershed) was designed to handle to perculiarities of high porosity materials, but it applies well to other materials as well. References ---------- [1] <NAME>. "A versatile and efficient network extraction algorithm using marker-based watershed segmenation". Physical Review E. (2017) """ logger.trace("Beginning SNOW algorithm") im_shape = np.array(im.shape) if im.dtype is not bool: logger.trace("Converting supplied image to boolean") im = im > 0 if dt is None: logger.trace("Peforming distance transform") if np.any(im_shape == 1): dt = edt(im.squeeze()) dt = dt.reshape(im.shape) else: dt = edt(im) if sigma > 0: logger.trace(f"Applying Gaussian blur with sigma = {sigma}") dt = spim.gaussian_filter(input=dt, sigma=sigma) peaks = find_peaks(dt=dt, r_max=r_max, divs=1) logger.debug(f"Initial number of peaks: {spim.label(peaks)[1]}") peaks = trim_saddle_points(peaks=peaks, dt=dt) logger.debug(f"Peaks after trimming saddle points: {spim.label(peaks)[1]}") peaks = trim_nearby_peaks(peaks=peaks, dt=dt) peaks, N = spim.label(peaks) logger.debug(f"Peaks after trimming nearby peaks: {N}") # Note that the mask argument results in some void voxels left unlabeled regions = watershed(image=-dt, markers=peaks) regions = regions * (im > 0) tup = Results() tup.im = im tup.dt = dt tup.peaks = peaks tup.regions = regions return tup def snow_partitioning_n(im, r_max=4, sigma=0.4): r""" This function partitions an imaging oontain an arbitrary number of phases into regions using a marker-based watershed segmentation. Its an extension of snow_partitioning function with all phases partitioned together. Parameters ---------- im : ndarray Image of porous material where each phase is represented by unique integer starting from 1 (0's are ignored). r_max : scalar The radius of the spherical structuring element to use in the Maximum filter stage that is used to find peaks. The default is 4. sigma : scalar The standard deviation of the Gaussian filter used. The default is 0.4. If 0 is given then the filter is not applied. Returns ------- results : Results object A custom object with the follow data as attributes: - 'im' The original image of the porous material - 'dt' The combined distance transform in alll phases of the image - 'phase_max_label' The list of max label of each phase in order to distinguish between each other - 'regions' The partitioned regions of n phases using a marker based watershed with the peaks found by the SNOW algorithm References ---------- [1] <NAME>. "A versatile and efficient network extraction algorithm using marker-based watershed segmentation". Physical Review E. (2017) [2] <NAME> et al. "Dual network extraction algorithm to investigate multiple transport processes in porous materials: Image-based modeling of pore and grain-scale processes". Computers in Chemical Engineering. (2019) See Also -------- snow_partitioning Notes ----- In principle it is possible to perform a distance transform on each phase separately, merge these into a single image, then apply the watershed only once. This, however, has been found to create edge artifacts between regions arising from the way watershed handles plateaus in the distance transform. To overcome this, this function applies the watershed to each of the distance transforms separately, then merges the segmented regions back into a single image. """ # Perform snow on each phase and merge all segmentation and dt together phases_num = np.unique(im * 1) phases_num =
np.trim_zeros(phases_num)
numpy.trim_zeros
""" This module contains the functions needed to use the spherical harmonic techniques """ import numpy as np import scipy.sparse from scipy.special import lpmv, spherical_jn, spherical_yn from numba import jit #@jit(nopython=True) def sub2indSH(m,n): """ i = sub2indSH(m,n) Convert Spherical Harmonic (m,n) indices to array index i Assumes that i iterates from 0 (Python style) """ i = n**2 + n + m return i #@jit(nopython=True) def ind2subSH(i): """ (m,n) = ind2subSH(i) Convert array index i to Spherical Harmonic (m,n) indices Assumes that i iterates from 0 (Python style) Assumes that arguments are NumPy arrays """ n = np.ceil(np.sqrt(i+1)-1); m = i - n**2 - n; return (m,n) #@jit(nopython=True) def cart2sph(x,y,z): """ (r, alpha, sinbeta, cosbeta) = Cart2Sph(x,y,z) Converts Cartesian coordinates (x,y,z) into Spherical Polar Coordinates (r, alpha, beta), where alpha is azimuth angle (angle in radians from the positive x axis, with rotation around the positive z axis according to the right-hand screw rule) and beta is polar angle (angle in radians from the positive z axis). beta can alternatively be returned as two arrays of its cos and sin values. It is assumed that x, y and z are all the same size. The returned arrays will be the same size as the arguments. """ r = np.sqrt(x**2 + y**2 + z**2) rho = np.sqrt(x**2 + y**2) alpha = np.arctan2(y, x) cosbeta = z / r sinbeta = rho / r return r, alpha, sinbeta, cosbeta #@jit(nopython=True) def reflect_sh(Bnm, xFlag, yFlag, zFlag): """ Bnm = ReflectSH(Bnm, xFlag, yFlag, zFlag) Reflect an Spherical Harmonic representation of a sound-field in 1 to 3 cartesian axes. Argumments: Bnm Vector of Spherical Harmonic weights. Must have (Order+1)^2 entries, where Order is an integer. xFlag Boolean indicating whether to flip in the x-direction yFlag Boolean indicating whether to flip in the y-direction zFlag Boolean indicating whether to flip in the z-direction """ # Get lists of n and m values: (m, n) = ind2subSH(np.arange(Bnm.size)) # Reflecting in Z: if zFlag: Bnm = Bnm * ((-1)**(n+m)).reshape((np.size(m),1)) # Reflecting in X: if xFlag: Bnm = Bnm * ((-1)**m).reshape((np.size(m),1)) # Reflecting in X or Y: if xFlag**yFlag: # XOR #for n in range(int(np.ceil(np.sqrt(Bnm.size)))-1): for n in np.arange(max(n)+1): i = sub2indSH(np.arange(-n,n+1),n).astype(int) Bnm[i,0] = np.flip(Bnm[i,0]) return Bnm #@jit(nopython=True) def get_translation_matrix(t,k,OrderS,OrderR): """ T = GetTranslationMatrix(t,k,OrderS,OrderR) Computes a translation matrix T from the coefficients of a Spherical Harmonic source (outgoing spherical Hankel radial functions) to the coefficients at a Spherical Harmonic receiver (spherical Bessel radial functions) location at position t relative to the source. It is assumed that both spherical coordinate systems (source and receiver) are aligned to the same Cartesian system in which t is expressed. a is the polar angle from the postive z axis. Essentially computes equation 3.2.17 of: <NAME>., & <NAME>. (2005). Fast Multipole Methods for the Helmholtz Equation in Three Dimensions (1st ed.). Elsevier Science. Arguments: t Cartesian translation vector (1x3 real row vector) k Wavenumber (positive real scalar or vector in radians/meter) OrderS Order of the source (non-negative real integer scalar) OrderR Order of the receiver (non-negative real integer scalar) This file also contains the sub-functions GetStructuralTranslationCoefficients and Wigner3jSymbol. """ OrderT = OrderS + OrderR S = GetStructuralTranslationCoefficients(OrderS,OrderR) # Express t in spherical coordinates: [r,alpha,sinbeta,cosbeta] = cart2sph(t[0],t[1],t[2]) # Evaluate spherical harmonic functions: Y, dy_dbeta, dy_dalpha = spherical_harmonic_all(OrderT, np.array([[alpha]]), np.array([[sinbeta]]), np.array([[cosbeta]])) # Allocated results array: T = np.zeros(((OrderR+1)**2, (OrderS+1)**2)) # Loop over translation order & compute summation: for nT in np.arange(OrderT+1): h, dhdz = spherical_hankel_out(nT,k*r) # Compute radial function: for mT in np.arange(-nT, nT+1): iT = sub2indSH(mT,nT) T = T + h * Y[0][int(iT)] * S[int(iT),:,:] #!!! return T #@jit(nopython=True) def GetStructuralTranslationCoefficients(OrderS,OrderR): """ S = GetStructuralTranslationCoefficients(OrderS,OrderR) Computes the 'Structural Translation Coefficients' used in Spherical Harmonic translation routines, as defined in section 3.2.1 of: <NAME>., & <NAME>. (2005). Fast Multipole Methods for the Helmholtz Equation in Three Dimensions (1st ed.). Elsevier Science. Arguments: OrderS Order of the source (non-negative real integer scalar) OrderR Order of the receiver (non-negative real integer scalar) Returned variable is a 3D array of size [(OrderR+1)**2, (OrderS+1)**2, (OrderR+OrderS+1)**2]. """ # Order required for translation: OrderT = OrderS + OrderR # Allocate cell array: S =
np.zeros(((OrderT+1)**2, (OrderR+1)**2, (OrderS+1)**2), dtype = np.complex64)
numpy.zeros
#! /usr/bin/python3 # -*- coding: utf-8 -*- """ ******** BST file ******** """ __author__ = '<NAME>' __copyright__ = 'Copyright 2021, nenupy' __credits__ = ['<NAME>'] __maintainer__ = 'Alan' __email__ = '<EMAIL>' __status__ = 'Production' __all__ = [ "XST" ] from abc import ABC import os from itertools import islice from astropy.time import Time, TimeDelta from astropy.coordinates import SkyCoord, AltAz, Angle import astropy.units as u from astropy.io import fits from healpy.fitsfunc import write_map, read_map from healpy.pixelfunc import mask_bad, nside2resol import numpy as np import json import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colorbar import ColorbarBase from matplotlib.ticker import LinearLocator from matplotlib.colors import Normalize from matplotlib.cm import get_cmap from mpl_toolkits.axes_grid1.inset_locator import inset_axes import dask.array as da from dask.diagnostics import ProgressBar import nenupy from os.path import join, dirname from nenupy.astro.target import FixedTarget, SolarSystemTarget from nenupy.io.io_tools import StatisticsData from nenupy.io.bst import BST_Slice from nenupy.astro import wavelength, altaz_to_radec, l93_to_etrs, etrs_to_enu from nenupy.astro.uvw import compute_uvw from nenupy.astro.sky import HpxSky from nenupy.astro.pointing import Pointing from nenupy.instru import NenuFAR, MiniArray, read_cal_table, freq2sb, nenufar_miniarrays from nenupy import nenufar_position, DummyCtMgr import logging log = logging.getLogger(__name__) # ============================================================= # # ------------------------- XST_Slice ------------------------- # # ============================================================= # class XST_Slice: """ """ def __init__(self, mini_arrays, time, frequency, value): self.mini_arrays = mini_arrays self.time = time self.frequency = frequency self.value = value # --------------------------------------------------------- # # ------------------------ Methods ------------------------ # def plot_correlaton_matrix(self, mask_autocorrelations: bool = False, **kwargs): """ """ max_ma_index = self.mini_arrays.max() + 1 all_mas = np.arange(max_ma_index) matrix = np.full([max_ma_index, max_ma_index], np.nan, "complex") ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0) for ma in all_mas: if ma not in self.mini_arrays: ma1[ma1 >= ma] += 1 ma2[ma2 >= ma] += 1 mask = None if mask_autocorrelations: mask = ma1 != ma2 # cross_correlation mask matrix[ma2[mask], ma1[mask]] = np.mean(self.value, axis=(0, 1))[mask] fig = plt.figure(figsize=kwargs.get("figsize", (10, 10))) ax = fig.add_subplot(111) ax.set_aspect("equal") data = np.absolute(matrix) if kwargs.get("decibel", True): data = 10*np.log10(data) im = ax.pcolormesh( all_mas, all_mas, data, shading="nearest", cmap=kwargs.get("cmap", "YlGnBu"), vmin=kwargs.get("vmin", np.nanmin(data)), vmax=kwargs.get("vmax", np.nanmax(data)) ) ax.set_xticks(all_mas[::2]) ax.set_yticks(all_mas[::2]) ax.grid(alpha=0.5) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.3) cbar = fig.colorbar(im, cax=cax) cbar.set_label(kwargs.get("colorbar_label", "dB" if kwargs.get("decibel", True) else "Amp")) # Axis abels ax.set_xlabel(f"Mini-Array index") ax.set_ylabel(f"Mini-Array index") # Title ax.set_title(kwargs.get("title", "")) # Save or show the figure figname = kwargs.get("figname", "") if figname != "": plt.savefig( figname, dpi=300, bbox_inches="tight", transparent=True ) log.info(f"Figure '{figname}' saved.") else: plt.show() plt.close("all") def rephase_visibilities(self, phase_center, uvw): """ """ # Compute the zenith original phase center zenith = SkyCoord( np.zeros(self.time.size), np.ones(self.time.size)*90, unit="deg", frame=AltAz( obstime=self.time, location=nenufar_position ) ) zenith_phase_center = altaz_to_radec(zenith) # Define the rotation matrix def rotation_matrix(skycoord): """ """ ra_rad = skycoord.ra.rad dec_rad = skycoord.dec.rad if np.isscalar(ra_rad): ra_rad = np.array([ra_rad]) dec_rad = np.array([dec_rad]) cos_ra = np.cos(ra_rad) sin_ra = np.sin(ra_rad) cos_dec = np.cos(dec_rad) sin_dec = np.sin(dec_rad) return np.array([ [cos_ra, -sin_ra, np.zeros(ra_rad.size)], [-sin_ra*sin_dec, -cos_ra*sin_dec, cos_dec], [sin_ra*cos_dec, cos_ra*cos_dec, sin_dec], ]) # Transformation matrices to_origin = rotation_matrix(zenith_phase_center) # (3, 3, ntimes) to_new_center = rotation_matrix(phase_center) # (3, 3, 1) total_transformation = np.matmul( np.transpose( to_new_center, (2, 0, 1) ), to_origin ) # (3, 3, ntimes) rotUVW = np.matmul( np.expand_dims( (to_origin[2, :] - to_new_center[2, :]).T, axis=1 ), np.transpose( to_origin, (2, 1, 0) ) ) # (ntimes, 1, 3) phase = np.matmul( rotUVW, np.transpose(uvw, (0, 2, 1)) ) # (ntimes, 1, nvis) rotate_visibilities = np.exp( 2.j*np.pi*phase/wavelength(self.frequency).to(u.m).value[None, :, None] ) # (ntimes, nfreqs, nvis) new_uvw = np.matmul( uvw, # (ntimes, nvis, 3) np.transpose(total_transformation, (2, 0, 1)) ) return rotate_visibilities, new_uvw def make_image(self, resolution: u.Quantity = 1*u.deg, fov_radius: u.Quantity = 25*u.deg, phase_center: SkyCoord = None, stokes: str = "I" ): """ :Example: xst = XST("XST.fits") data = xst.get_stokes("I") sky = data.make_image( resolution=0.5*u.deg, fov_radius=27*u.deg, phase_center=SkyCoord(277.382, 48.746, unit="deg") ) sky[0, 0, 0].plot( center=SkyCoord(277.382, 48.746, unit="deg"), radius=24.5*u.deg ) """ exposure = self.time[-1] - self.time[0] # Compute XST UVW coordinates (zenith phased) uvw = compute_uvw( interferometer=NenuFAR()[self.mini_arrays], phase_center=None, # will be zenith time=self.time, ) # Prepare visibilities rephasing rephase_matrix, uvw = self.rephase_visibilities( phase_center=phase_center, uvw=uvw ) # Mask auto-correlations ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0) cross_mask = ma1 != ma2 uvw = uvw[:, cross_mask, :] # Transform to lambda units wvl = wavelength(self.frequency).to(u.m).value uvw = uvw[:, None, :, :]/wvl[None, :, None, None] # (t, f, bsl, 3) # Mean in time uvw = np.mean(uvw, axis=0) # Prepare the sky sky = HpxSky( resolution=resolution, time=self.time[0] + exposure/2, frequency=np.mean(self.frequency), polarization=np.array([stokes]), value=np.nan ) # Compute LMN coordinates image_mask = sky.visible_mask[0, 0, 0] image_mask *= sky.coordinates.separation(phase_center) <= fov_radius l, m, n = sky.compute_lmn( phase_center=phase_center, coordinate_mask=image_mask ) lmn = np.array([l, m, (n - 1)], dtype=np.float32).T n_pix = l.size lmn = da.from_array( lmn, chunks=(np.floor(n_pix/os.cpu_count()), 3) ) # Transform to Dask array n_bsl = uvw.shape[1] n_freq = self.frequency.size n_pix = l.size uvw = da.from_array( uvw.astype(np.float32), chunks=(n_freq, np.floor(n_bsl/os.cpu_count()), 3) ) # Compute the phase uvwlmn = np.sum(uvw[:, :, None, :] * lmn[None, None, :, :], axis=-1) phase =
np.exp( -2j * np.pi * uvwlmn )
numpy.exp
"""@package deepac.nn_train Train a NN on Illumina reads. Requires a config file describing the available devices, data loading mode, input sequence length, network architecture, paths to input files and how should be the model trained. """ import numpy as np import tensorflow as tf import re import os import sys import errno import warnings from contextlib import redirect_stdout import math from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Dropout, Activation, Input, Lambda, Masking from tensorflow.keras.layers import concatenate, add, multiply, average, maximum, Flatten from tensorflow.keras.layers import LSTM, Bidirectional from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D, GlobalAveragePooling1D, MaxPooling1D, AveragePooling1D import tensorflow.keras.backend as K from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping, TensorBoard from tensorflow.keras.utils import plot_model from tensorflow.keras import regularizers from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.initializers import orthogonal from tensorflow.keras.models import load_model from deepac.utils import ReadSequence, CSVMemoryLogger, set_mem_growth, DatasetParser class RCConfig: """ RCNet configuration class. """ def __init__(self, config): """RCConfig constructor""" try: self.strategy_dict = { "MirroredStrategy": tf.distribute.MirroredStrategy, "OneDeviceStrategy": tf.distribute.OneDeviceStrategy, "CentralStorageStrategy": tf.distribute.experimental.CentralStorageStrategy, "MultiWorkerMirroredStrategy": tf.distribute.experimental.MultiWorkerMirroredStrategy, "TPUStrategy": tf.distribute.experimental.TPUStrategy, } # Devices Config # # Get the number of available GPUs try: self.strategy = config['Devices']['DistStrategy'] except KeyError: print("Unknown distribution strategy. Using MirroredStrategy.") self.strategy = "MirroredStrategy" self._n_gpus = 0 self.tpu_strategy = None # for using tf.device instead of strategy try: self.simple_build = config['Devices'].getboolean('Simple_build') if tf.executing_eagerly() else True except KeyError: self.simple_build = False if tf.executing_eagerly() else True self.base_batch_size = config['Training'].getint('BatchSize') self.batch_size = self.base_batch_size self.set_n_gpus() self.model_build_device = config['Devices']['Device_build'] # Data Loading Config # # If using generators to load data batch by batch, set up the number of batch workers and the queue size self.use_generators_keras = config['DataLoad'].getboolean('LoadTrainingByBatch') self.use_tf_data = config['DataLoad'].getboolean('Use_TFData') self.multiprocessing = config['DataLoad'].getboolean('Multiprocessing') self.batch_loading_workers = config['DataLoad'].getint('BatchWorkers') self.batch_queue = config['DataLoad'].getint('BatchQueue') # Input Data Config # # Set the sequence length and the alphabet self.seq_length = config['InputData'].getint('SeqLength') self.alphabet = "ACGT" self.seq_dim = len(self.alphabet) try: self.mask_zeros = config['InputData'].getboolean('MaskZeros') except KeyError: self.mask_zeros = False # subread settings (subread = first k nucleotides of a read) self.use_subreads = config['InputData'].getboolean('UseSubreads') self.min_subread_length = config['InputData'].getint('MinSubreadLength') self.max_subread_length = config['InputData'].getint('MaxSubreadLength') self.dist_subread = config['InputData']['DistSubread'] # Architecture Config # # Set the seed if config['Architecture']['Seed'] == "none" or config['Architecture']['Seed'] == "None": self.seed = None else: self.seed = config['Architecture'].getint('Seed') # Set the initializer (choose between He and Glorot uniform) self.init_mode = config['Architecture']['WeightInit'] self._initializer_dict = { "he_uniform": tf.keras.initializers.he_uniform(self.seed), # scale=2, mode=fan_in "glorot_uniform": tf.keras.initializers.glorot_uniform(self.seed), # scale=1, mode=fan_avg } self.initializers = {} if self.init_mode == 'custom': self.initializers["conv"] = self._initializer_dict[config['Architecture']['WeightInit_Conv']] self.initializers["merge"] = self._initializer_dict[config['Architecture']['WeightInit_Merge']] self.initializers["lstm"] = self._initializer_dict[config['Architecture']['WeightInit_LSTM']] self.initializers["dense"] = self._initializer_dict[config['Architecture']['WeightInit_Dense']] self.initializers["out"] = self._initializer_dict[config['Architecture']['WeightInit_Out']] else: self.initializers["conv"] = self._initializer_dict[config['Architecture']['WeightInit']] self.initializers["merge"] = self._initializer_dict[config['Architecture']['WeightInit']] self.initializers["lstm"] = self._initializer_dict[config['Architecture']['WeightInit']] self.initializers["dense"] = self._initializer_dict[config['Architecture']['WeightInit']] self.initializers["out"] = self._initializer_dict[config['Architecture']['WeightInit']] self.ortho_gain = config['Architecture'].getfloat('OrthoGain') # Define the network architecture self.rc_mode = config['Architecture']['RC_Mode'] self.n_conv = config['Architecture'].getint('N_Conv') try: self.skip_size = config['Architecture'].getint('Skip_Size') except KeyError: self.skip_size = 0 self.n_recurrent = config['Architecture'].getint('N_Recurrent') self.n_dense = config['Architecture'].getint('N_Dense') self.input_dropout = config['Architecture'].getfloat('Input_Dropout') self.conv_units = [int(u) for u in config['Architecture']['Conv_Units'].split(',')] self.conv_filter_size = [int(s) for s in config['Architecture']['Conv_FilterSize'].split(',')] self.conv_dilation = [int(s) for s in config['Architecture']['Conv_Dilation'].split(',')] self.conv_stride = [int(s) for s in config['Architecture']['Conv_Stride'].split(',')] self.conv_activation = config['Architecture']['Conv_Activation'] try: self.padding = config['Architecture']['Conv_Padding'] except KeyError: self.padding = "same" self.conv_bn = config['Architecture'].getboolean('Conv_BN') self.conv_pooling = config['Architecture']['Conv_Pooling'] self.conv_dropout = config['Architecture'].getfloat('Conv_Dropout') self.recurrent_units = [int(u) for u in config['Architecture']['Recurrent_Units'].split(',')] self.recurrent_bn = config['Architecture'].getboolean('Recurrent_BN') if self.n_recurrent == 1 and self.recurrent_bn: raise ValueError("RC-BN is intended for RC layers with 2D output. Use RC-Conv1D or RC-LSTM returning" " sequences.") self.recurrent_dropout = config['Architecture'].getfloat('Recurrent_Dropout') merge_dict = { # motif on fwd fuzzy OR rc (Goedel t-conorm) "maximum": maximum, # motif on fwd fuzzy AND rc (product t-norm) "multiply": multiply, # motif on fwd PLUS/"OR" rc (Shrikumar-style) "add": add, # motif on fwd PLUS/"OR" rc (Shrikumar-style), rescaled "average": average } if self.rc_mode != "none": self.dense_merge = merge_dict.get(config['Architecture']['Dense_Merge']) if self.dense_merge is None: raise ValueError('Unknown dense merge function') self.dense_units = [int(u) for u in config['Architecture']['Dense_Units'].split(',')] self.dense_activation = config['Architecture']['Dense_Activation'] self.dense_bn = config['Architecture'].getboolean('Dense_BN') self.dense_dropout = config['Architecture'].getfloat('Dense_Dropout') try: self.mc_dropout = config['Architecture'].getboolean('MC_Dropout') self.dropout_training_mode = None if not self.mc_dropout else True except KeyError: self.mc_dropout = False self.dropout_training_mode = None # If needed, weight classes self.use_weights = config['ClassWeights'].getboolean('UseWeights') if self.use_weights: try: counts = [float(x) for x in config['ClassWeights']['ClassCounts'].split(',')] except KeyError: counts = [config['ClassWeights'].getfloat('ClassCount_0'), config['ClassWeights'].getfloat('ClassCount_1')] sum_count = sum(counts) weights = [sum_count/(2*class_count) for class_count in counts] classes = range(len(counts)) self.class_weight = dict(zip(classes, weights)) self.log_init = False if self.log_init: self.output_bias = tf.keras.initializers.Constant(np.log(counts[1]/counts[0])) else: self.output_bias = 'zeros' else: self.class_weight = None self.output_bias = 'zeros' # Paths Config # # Set the input data paths self.x_train_path = config['Paths']['TrainingData'] self.y_train_path = config['Paths']['TrainingLabels'] self.x_val_path = config['Paths']['ValidationData'] self.y_val_path = config['Paths']['ValidationLabels'] # Set the run name self.runname = config['Paths']['RunName'] # Training Config # # Set the number op epochs, batch size and the optimizer self.epoch_start = config['Training'].getint('EpochStart') - 1 self.epoch_end = config['Training'].getint('EpochEnd') self.patience = config['Training'].getint('Patience') try: self.l1 = config['Training'].getfloat('Lambda_L1') except KeyError: self.l1 = 0.0 self.l2 = config['Training'].getfloat('Lambda_L2') self.regularizer = regularizers.L1L2(self.l1, self.l2) self.learning_rate = config['Training'].getfloat('LearningRate') self.optimization_method = config['Training']['Optimizer'] if self.optimization_method == "adam": self.optimizer = Adam(lr=self.learning_rate) else: warnings.warn("Custom learning rates implemented for Adam only. Using default Keras learning rate.") self.optimizer = self.optimization_method # If needed, log the memory usage self.log_memory = config['Training'].getboolean('MemUsageLog') self.summaries = config['Training'].getboolean('Summaries') self.log_superpath = config['Training']['LogPath'] self.log_dir = os.path.join(self.log_superpath, "{runname}-logs".format(runname=self.runname)) self.use_tb = config['Training'].getboolean('Use_TB') if self.use_tb: self.tb_hist_freq = config['Training'].getint('TBHistFreq') except KeyError as ke: sys.exit("The config file is not compatible with this version of DeePaC. " "Missing keyword: {}".format(ke)) except AttributeError as ae: sys.exit("The config file is not compatible with this version of DeePaC. " "Error: {}".format(ae)) def set_tf_session(self): """Set TF session.""" # If no GPUs, use CPUs if self._n_gpus == 0: self.model_build_device = '/cpu:0' set_mem_growth() def set_n_gpus(self): self._n_gpus = len(tf.config.get_visible_devices('GPU')) self.batch_size = self.base_batch_size * self._n_gpus if self._n_gpus > 0 else self.base_batch_size def get_n_gpus(self): return self._n_gpus def set_tpu_resolver(self, tpu_resolver): if tpu_resolver is not None: self.tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu_resolver) self.batch_size = self.base_batch_size * self.tpu_strategy.num_replicas_in_sync class RCNet: """ Reverse-complement neural network class. """ def __init__(self, config, training_mode=True, verbose_load=False): """RCNet constructor and config parsing""" self.config = config if self.config.use_tf_data and not tf.executing_eagerly(): warnings.warn("Training with TFRecordDatasets supported only in eager mode. Looking for .npy files...") self.config.use_tf_data = False self.config.set_tf_session() self.history = None self.verbose_load = verbose_load self._t_sequence = None self._v_sequence = None self.training_sequence = None self.x_train = None self.y_train = None self.length_train = 0 self.val_indices = None self.x_val = None self.y_val = None self.validation_data = (self.x_val, self.y_val) self.length_val = 0 self.model = None if training_mode: try: os.makedirs(self.config.log_dir) except OSError as e: if e.errno != errno.EEXIST: raise self._set_callbacks() # Set strategy if self.config.tpu_strategy is not None: self.strategy = self.config.tpu_strategy elif self.config.simple_build: self.strategy = None elif self.config.strategy == "OneDeviceStrategy": self.strategy = self.config.strategy_dict[self.config.strategy](self.config.model_build_device) else: self.strategy = self.config.strategy_dict[self.config.strategy]() if float(tf.__version__[:3]) > 2.1 and self.config.epoch_start > 0: checkpoint_name = self.config.log_dir + "/{runname}-".format(runname=self.config.runname) model_file = checkpoint_name + "e{epoch:03d}.h5".format(epoch=self.config.epoch_start) print("Loading " + model_file) with self.get_device_strategy_scope(): self.model = load_model(model_file) else: # Build the model using the CPU or GPU or TPU with self.get_device_strategy_scope(): if self.config.rc_mode == "full": self._build_rc_model() elif self.config.rc_mode == "siam": self._build_siam_model() elif self.config.rc_mode == "none": self._build_simple_model() else: raise ValueError('Unrecognized RC mode') if self.config.epoch_start > 0: print("WARNING: loading a pre-trained model will reset the optimizer state. Please update to TF>=2.2.") checkpoint_name = self.config.log_dir + "/{runname}-".format(runname=self.config.runname) model_file = checkpoint_name + "e{epoch:03d}.h5".format(epoch=self.config.epoch_start) path = re.sub("\.h5$", "", model_file) weights_path = path + "_weights.h5" print("Loading " + weights_path) self.model.load_weights(weights_path) def get_device_strategy_scope(self): if self.config.simple_build: device_strategy_scope = tf.device(self.config.model_build_device) else: device_strategy_scope = self.strategy.scope() return device_strategy_scope def load_data(self): """Load datasets""" print("Loading...") if self.config.use_tf_data: prefetch_size = tf.data.experimental.AUTOTUNE def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\.").search(filename).group(1)) for filename in filenames] return np.max(n) + 1 parser = DatasetParser(self.config.seq_length) train_filenames = tf.io.gfile.glob(self.config.x_train_path + "/*.tfrec") self.length_train = count_data_items(train_filenames) self.training_sequence = \ parser.read_dataset(train_filenames).shuffle(buffer_size=self.config.batch_size*self.config.batch_queue) self.training_sequence = \ self.training_sequence.repeat().batch(self.config.batch_size).prefetch(prefetch_size) val_filenames = tf.io.gfile.glob(self.config.x_val_path + "/*.tfrec") self.length_val = count_data_items(val_filenames) self.validation_data = \ parser.read_dataset(val_filenames).repeat().batch(self.config.batch_size).prefetch(prefetch_size) elif self.config.use_generators_keras: # Prepare the generators for loading data batch by batch self.x_train = np.load(self.config.x_train_path, mmap_mode='r') self.y_train = np.load(self.config.y_train_path, mmap_mode='r') self._t_sequence = ReadSequence(self.x_train, self.y_train, self.config.batch_size, self.config.use_subreads, self.config.min_subread_length, self.config.max_subread_length, self.config.dist_subread, verbose_id="TRAIN" if self.verbose_load else None) self.training_sequence = self._t_sequence self.length_train = len(self.x_train) # Prepare the generators for loading data batch by batch self.x_val = np.load(self.config.x_val_path, mmap_mode='r') self.y_val = np.load(self.config.y_val_path, mmap_mode='r') self._v_sequence = ReadSequence(self.x_val, self.y_val, self.config.batch_size, self.config.use_subreads, self.config.min_subread_length, self.config.max_subread_length, self.config.dist_subread, verbose_id="VAL" if self.verbose_load else None) self.validation_data = self._v_sequence self.length_val = len(self.x_val) else: # ... or load all the data to memory self.x_train = np.load(self.config.x_train_path) self.y_train = np.load(self.config.y_train_path) self.length_train = self.x_train.shape # ... or load all the data to memory self.x_val = np.load(self.config.x_val_path) self.y_val = np.load(self.config.y_val_path) self.val_indices = np.arange(len(self.y_val)) np.random.shuffle(self.val_indices) self.x_val = self.x_val[self.val_indices] self.y_val = self.y_val[self.val_indices] self.validation_data = (self.x_val, self.y_val) self.length_val = self.x_val.shape[0] def _add_lstm(self, inputs, return_sequences): # LSTM with sigmoid activation corresponds to the CuDNNLSTM if not tf.executing_eagerly() and (self.config.get_n_gpus() > 0 and re.match("cpu", self.config.model_build_device, re.IGNORECASE) is None): x = Bidirectional(tf.compat.v1.keras.layers.CuDNNLSTM(self.config.recurrent_units[0], kernel_initializer=self.config.initializers["lstm"], recurrent_initializer=orthogonal( gain=self.config.ortho_gain, seed=self.config.seed), kernel_regularizer=self.config.regularizer, return_sequences=return_sequences))(inputs) else: x = Bidirectional(LSTM(self.config.recurrent_units[0], kernel_initializer=self.config.initializers["lstm"], recurrent_initializer=orthogonal(gain=self.config.ortho_gain, seed=self.config.seed), kernel_regularizer=self.config.regularizer, return_sequences=return_sequences, recurrent_activation='sigmoid'))(inputs) return x def _add_siam_lstm(self, inputs_fwd, inputs_rc, return_sequences, units): # LSTM with sigmoid activation corresponds to the CuDNNLSTM if not tf.executing_eagerly() and (self.config.get_n_gpus() > 0 and re.match("cpu", self.config.model_build_device, re.IGNORECASE) is None): shared_lstm = Bidirectional( tf.compat.v1.keras.layers.CuDNNLSTM(units, kernel_initializer=self.config.initializers["lstm"], recurrent_initializer=orthogonal( gain=self.config.ortho_gain, seed=self.config.seed), kernel_regularizer=self.config.regularizer, return_sequences=return_sequences)) else: shared_lstm = Bidirectional(LSTM(units, kernel_initializer=self.config.initializers["lstm"], recurrent_initializer=orthogonal(gain=self.config.ortho_gain, seed=self.config.seed), kernel_regularizer=self.config.regularizer, return_sequences=return_sequences, recurrent_activation='sigmoid')) x_fwd = shared_lstm(inputs_fwd) x_rc = shared_lstm(inputs_rc) return x_fwd, x_rc def _add_rc_lstm(self, inputs, return_sequences, units): revcomp_in = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=inputs.shape[1:], name="reverse_complement_lstm_input_{n}".format(n=self._current_recurrent+1)) inputs_rc = revcomp_in(inputs) x_fwd, x_rc = self._add_siam_lstm(inputs, inputs_rc, return_sequences, units) if return_sequences: rev_axes = (1, 2) else: rev_axes = 1 revcomp_out = Lambda(lambda x: K.reverse(x, axes=rev_axes), output_shape=x_rc.shape[1:], name="reverse_lstm_output_{n}".format(n=self._current_recurrent + 1)) x_rc = revcomp_out(x_rc) out = concatenate([x_fwd, x_rc], axis=-1) return out def _add_siam_conv1d(self, inputs_fwd, inputs_rc, units, kernel_size, dilation_rate=1, stride=1): shared_conv = Conv1D(filters=units, kernel_size=kernel_size, dilation_rate=dilation_rate, padding=self.config.padding, kernel_initializer=self.config.initializers["conv"], kernel_regularizer=self.config.regularizer, strides=stride) x_fwd = shared_conv(inputs_fwd) x_rc = shared_conv(inputs_rc) return x_fwd, x_rc def _add_rc_conv1d(self, inputs, units, kernel_size, dilation_rate=1, stride=1): revcomp_in = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=inputs.shape[1:], name="reverse_complement_conv1d_input_{n}".format(n=self._current_conv+1)) inputs_rc = revcomp_in(inputs) x_fwd, x_rc = self._add_siam_conv1d(inputs, inputs_rc, units, kernel_size, dilation_rate, stride) revcomp_out = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=x_rc.shape[1:], name="reverse_complement_conv1d_output_{n}".format(n=self._current_conv + 1)) x_rc = revcomp_out(x_rc) out = concatenate([x_fwd, x_rc], axis=-1) return out def _add_siam_batchnorm(self, inputs_fwd, inputs_rc): input_shape = inputs_rc.shape if len(input_shape) != 3: raise ValueError("Intended for RC layers with 2D output. Use RC-Conv1D or RC-LSTM returning sequences." "Expected dimension: 3, but got: " + str(len(input_shape))) out = concatenate([inputs_fwd, inputs_rc], axis=1) out = BatchNormalization()(out) split_shape = out.shape[1] // 2 new_shape = [split_shape, input_shape[2]] fwd_out = Lambda(lambda x: x[:, :split_shape, :], output_shape=new_shape, name="split_batchnorm_fwd_output_{n}".format(n=self._current_bn+1)) rc_out = Lambda(lambda x: x[:, split_shape:, :], output_shape=new_shape, name="split_batchnorm_rc_output1_{n}".format(n=self._current_bn+1)) x_fwd = fwd_out(out) x_rc = rc_out(out) return x_fwd, x_rc def _add_rc_batchnorm(self, inputs): input_shape = inputs.shape if len(input_shape) != 3: raise ValueError("Intended for RC layers with 2D output. Use RC-Conv1D or RC-LSTM returning sequences." "Expected dimension: 3, but got: " + str(len(input_shape))) split_shape = inputs.shape[-1] // 2 new_shape = [input_shape[1], split_shape] fwd_in = Lambda(lambda x: x[:, :, :split_shape], output_shape=new_shape, name="split_batchnorm_fwd_input_{n}".format(n=self._current_bn+1)) rc_in = Lambda(lambda x: K.reverse(x[:, :, split_shape:], axes=(1, 2)), output_shape=new_shape, name="split_batchnorm_rc_input_{n}".format(n=self._current_bn+1)) inputs_fwd = fwd_in(inputs) inputs_rc = rc_in(inputs) x_fwd, x_rc = self._add_siam_batchnorm(inputs_fwd, inputs_rc) rc_out = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=x_rc.shape, name="split_batchnorm_rc_output2_{n}".format(n=self._current_bn+1)) x_rc = rc_out(x_rc) out = concatenate([x_fwd, x_rc], axis=-1) return out def _add_siam_merge_dense(self, inputs_fwd, inputs_rc, units, merge_function=add): shared_dense = Dense(units, kernel_initializer=self.config.initializers["merge"], kernel_regularizer=self.config.regularizer) x_fwd = shared_dense(inputs_fwd) x_rc = shared_dense(inputs_rc) out = merge_function([x_fwd, x_rc]) return out def _add_rc_merge_dense(self, inputs, units, merge_function=add): split_shape = inputs.shape[-1] // 2 fwd_in = Lambda(lambda x: x[:, :split_shape], output_shape=[split_shape], name="split_merging_dense_input_fwd_{n}".format(n=1)) rc_in = Lambda(lambda x: x[:, split_shape:], output_shape=[split_shape], name="split_merging_dense_input_rc_{n}".format(n=1)) x_fwd = fwd_in(inputs) x_rc = rc_in(inputs) rc_rev = Lambda(lambda x: K.reverse(x, axes=1), output_shape=x_rc.shape[1:], name="reverse_merging_dense_input_{n}".format(n=1)) x_rc = rc_rev(x_rc) return self._add_siam_merge_dense(x_fwd, x_rc, units, merge_function) def _add_skip(self, source, residual): stride = int(round(source.shape[1] / residual.shape[1])) if (source.shape[1] != residual.shape[1]) or (source.shape[-1] != residual.shape[-1]): source = Conv1D(filters=residual.shape[-1], kernel_size=1, strides=stride, padding=self.config.padding, kernel_initializer=self.config.initializer, kernel_regularizer=self.config.regularizer)(source) return add([source, residual]) def _add_siam_skip(self, source_fwd, source_rc, residual_fwd, residual_rc): # Cast Dimension to int for TF 1 compatibility stride_fwd = int(round(int(source_fwd.shape[1]) / int(residual_fwd.shape[1]))) stride_rc = int(round(int(source_rc.shape[1]) / int(residual_rc.shape[1]))) assert stride_fwd == stride_rc, "Fwd and rc shapes differ." fwd_equal = (source_fwd.shape[1] == residual_fwd.shape[1]) and (source_fwd.shape[-1] == residual_fwd.shape[-1]) rc_equal = (source_rc.shape[1] == residual_rc.shape[1]) and (source_rc.shape[-1] == residual_rc.shape[-1]) if not (fwd_equal and rc_equal): source_fwd, source_rc = self._add_siam_conv1d(source_fwd, source_rc, units=residual_fwd.shape[-1], kernel_size=1, stride=stride_fwd) return add([source_fwd, residual_fwd]), add([source_rc, residual_rc]) def _add_rc_skip(self, source, residual): equal = (source.shape[1] == residual.shape[1]) and (source.shape[-1] == residual.shape[-1]) if equal: return add([source, residual]) else: split_shape_src = source.shape[-1] // 2 new_shape_src = [source.shape[1], split_shape_src] split_shape_res = residual.shape[-1] // 2 new_shape_res = [residual.shape[1], split_shape_res] fwd_src_in = Lambda(lambda x: x[:, :, :split_shape_src], output_shape=new_shape_src, name="forward_skip_src_in_{n}".format(n=self._current_conv + 1)) rc_src_in = Lambda(lambda x: K.reverse(x[:, :, split_shape_src:], axes=(1, 2)), output_shape=new_shape_src, name="reverse_complement_skip_src_in_{n}".format(n=self._current_conv + 1)) fwd_res_in = Lambda(lambda x: x[:, :, :split_shape_res], output_shape=new_shape_res, name="forward_skip_res_in_{n}".format(n=self._current_conv + 1)) rc_res_in = Lambda(lambda x: K.reverse(x[:, :, split_shape_res:], axes=(1, 2)), output_shape=new_shape_res, name="reverse_complement_skip_res_in{n}".format(n=self._current_conv + 1)) source_fwd = fwd_src_in(source) residual_fwd = fwd_res_in(residual) source_rc = rc_src_in(source) residual_rc = rc_res_in(residual) x_fwd, x_rc = self._add_siam_skip(source_fwd, source_rc, residual_fwd, residual_rc) revcomp_out = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=x_rc.shape[1:], name="reverse_complement_skip_output_{n}".format(n=self._current_conv + 1)) x_rc = revcomp_out(x_rc) out = concatenate([x_fwd, x_rc], axis=-1) return out def _add_rc_pooling(self, inputs, pooling_layer): input_shape = inputs.shape if len(input_shape) != 3: raise ValueError("Intended for RC layers with 2D output. Use RC-Conv1D or RC-LSTM returning sequences." "Expected dimension: 3, but got: " + str(len(input_shape))) split_shape = inputs.shape[-1] // 2 new_shape = [input_shape[1], split_shape] fwd_in = Lambda(lambda x: x[:, :, :split_shape], output_shape=new_shape, name="split_pooling_fwd_input_{n}".format(n=self._current_pool+1)) rc_in = Lambda(lambda x: K.reverse(x[:, :, split_shape:], axes=(1, 2)), output_shape=new_shape, name="split_pooling_rc_input_{n}".format(n=self._current_pool+1)) inputs_fwd = fwd_in(inputs) inputs_rc = rc_in(inputs) x_fwd = pooling_layer(inputs_fwd) x_rc = pooling_layer(inputs_rc) rc_out = Lambda(lambda x: K.reverse(x, axes=(1, 2)), output_shape=x_rc.shape, name="split_pooling_rc_output_{n}".format(n=self._current_pool+1)) x_rc = rc_out(x_rc) out = concatenate([x_fwd, x_rc], axis=-1) return out def _build_simple_model(self): """Build the standard network""" print("Building model...") # Number of added recurrent layers self._current_recurrent = 0 # Initialize input inputs = Input(shape=(self.config.seq_length, self.config.seq_dim)) if self.config.mask_zeros: x = Masking()(inputs) else: x = inputs # The last recurrent layer should return the output for the last unit only. # Previous layers must return output for all units return_sequences = True if self.config.n_recurrent > 1 else False # Input dropout if not np.isclose(self.config.input_dropout, 0.0): x = Dropout(self.config.input_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) else: x = inputs # First convolutional/recurrent layer if self.config.n_conv > 0: # Convolutional layers will always be placed before recurrent ones # Standard convolutional layer x = Conv1D(filters=self.config.conv_units[0], kernel_size=self.config.conv_filter_size[0], padding=self.config.padding, kernel_initializer=self.config.initializers["conv"], kernel_regularizer=self.config.regularizer, strides=self.config.conv_stride[0])(x) if self.config.conv_bn: # Standard batch normalization layer x = BatchNormalization()(x) # Add activation x = Activation(self.config.conv_activation)(x) elif self.config.n_recurrent > 0: # If no convolutional layers, the first layer is recurrent. # CuDNNLSTM requires a GPU and tensorflow with cuDNN x = self._add_lstm(x, return_sequences) if self.config.recurrent_bn and return_sequences: # Standard batch normalization layer x = BatchNormalization()(x) # Add dropout x = Dropout(self.config.recurrent_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) # First recurrent layer already added self._current_recurrent = 1 else: raise ValueError('First layer should be convolutional or recurrent') if self.config.skip_size > 0: start = x else: start = None # For next convolutional layers for i in range(1, self.config.n_conv): # Add pooling first if self.config.conv_pooling == 'max': x = MaxPooling1D()(x) elif self.config.conv_pooling == 'average': x = AveragePooling1D()(x) elif not (self.config.conv_pooling in ['last_max', 'last_average', 'none']): # Skip pooling if it should be applied to the last conv layer or skipped altogether. # Throw a ValueError if the pooling method is unrecognized. raise ValueError('Unknown pooling method') # Add dropout (drops whole features) if not np.isclose(self.config.conv_dropout, 0.0): x = Dropout(self.config.conv_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) # Add layer # Standard convolutional layer x = Conv1D(filters=self.config.conv_units[i], kernel_size=self.config.conv_filter_size[i], padding=self.config.padding, kernel_initializer=self.config.initializers["conv"], kernel_regularizer=self.config.regularizer, strides=self.config.conv_stride[i])(x) # Pre-activation skip connections https://arxiv.org/pdf/1603.05027v2.pdf if self.config.skip_size > 0: if i % self.config.skip_size == 0: end = x x = self._add_skip(start, end) start = x # Add batch norm if self.config.conv_bn: # Standard batch normalization layer x = BatchNormalization()(x) # Add activation x = Activation(self.config.conv_activation)(x) # Pooling layer if self.config.n_conv > 0: if self.config.conv_pooling == 'max' or self.config.conv_pooling == 'last_max': if self.config.n_recurrent == 0: # If no recurrent layers, use global pooling x = GlobalMaxPooling1D()(x) else: # for recurrent layers, use normal pooling x = MaxPooling1D()(x) elif self.config.conv_pooling == 'average' or self.config.conv_pooling == 'last_average': if self.config.n_recurrent == 0: # if no recurrent layers, use global pooling x = GlobalAveragePooling1D()(x) else: # for recurrent layers, use normal pooling x = AveragePooling1D()(x) elif self.config.conv_pooling == 'none': if self.config.n_recurrent == 0: x = Flatten()(x) else: raise ValueError('No pooling ("none") is not compatible with following LSTM layers.') else: # Skip pooling if needed or throw a ValueError if the pooling method is unrecognized # (should be thrown above) raise ValueError('Unknown pooling method') # Add dropout (drops whole features) if not np.isclose(self.config.conv_dropout, 0.0): x = Dropout(self.config.conv_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) # Recurrent layers for i in range(self._current_recurrent, self.config.n_recurrent): if i == self.config.n_recurrent - 1: # In the last layer, return output only for the last unit return_sequences = False # Add a bidirectional recurrent layer. CuDNNLSTM requires a GPU and tensorflow with cuDNN x = self._add_lstm(inputs, return_sequences) if self.config.recurrent_bn and return_sequences: # Standard batch normalization layer x = BatchNormalization()(x) # Add dropout x = Dropout(self.config.recurrent_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) # Dense layers for i in range(0, self.config.n_dense): x = Dense(self.config.dense_units[i], kernel_initializer=self.config.initializers["dense"], kernel_regularizer=self.config.regularizer)(x) if self.config.dense_bn: # Standard batch normalization layer x = BatchNormalization()(x) x = Activation(self.config.dense_activation)(x) x = Dropout(self.config.dense_dropout, seed=self.config.seed)(x, training=self.config.dropout_training_mode) # Output layer for binary classification x = Dense(1, kernel_initializer=self.config.initializers["out"], kernel_regularizer=self.config.regularizer, bias_initializer=self.config.output_bias)(x) x = Activation('sigmoid')(x) # Initialize the model self.model = Model(inputs, x) def _build_rc_model(self): """Build the RC network""" print("Building RC-model...") # Number of added recurrent layers self._current_recurrent = 0 self._current_conv = 0 self._current_bn = 0 self._current_pool = 0 # Initialize input inputs = Input(shape=(self.config.seq_length, self.config.seq_dim)) if self.config.mask_zeros: x = Masking()(inputs) else: x = inputs # The last recurrent layer should return the output for the last unit only. # Previous layers must return output for all units return_sequences = True if self.config.n_recurrent > 1 else False # Input dropout if not
np.isclose(self.config.input_dropout, 0.0)
numpy.isclose
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Apr. 1 2019 NCA-18650 cell @author: shpark Parameters from NCA-18650 Reference: Park et al., "Optimal Experimental Design for Parameterization of an Electrochemical Lithium-ion Battery Model" Journal of The Electrochemical Society, 165(7), 2018 """ import numpy as np p={} #============================================================================== # Geometric params #============================================================================== # Thickness of each layer p['L_n'] = 79.0e-6 # Thickness of negative electrode [m] p['L_s'] = 80.0e-6 # Thickness of separator [m] p['L_p'] = 61.5e-6 # Thickness of positive electrode [m] L_ccn = 25e-6; # Thickness of negative current collector [m] L_ccp = 25e-6; # Thickness of negative current collector [m] # Particle Radii p['R_s_n'] = 2.0249e-05 # Radius of solid particles in negative electrode [m] p['R_s_p'] = 1.6973e-05 # Radius of solid particles in positive electrode [m] # Volume fractions p['epsilon_s_n'] = 0.543889597565723 # Volume fraction in solid for neg. electrode p['epsilon_s_p'] = 0.666364981170368 # Volume fraction in solid for pos. electrode p['epsilon_e_n'] = 0.347495486967184 # Volume fraction in electrolyte for neg. electrode p['epsilon_e_s'] = 0.5 # Volume fraction in electrolyte for separator p['epsilon_e_p'] = 0.330000000000000 # Volume fraction in electrolyte for pos. electrode p['epsilon_f_n'] = 1 - p['epsilon_s_n'] - p['epsilon_e_n'] # Volume fraction of filler in neg. electrode p['epsilon_f_p'] = 1 - p['epsilon_s_p'] - p['epsilon_e_p'] # Volume fraction of filler in pos. electrode # Specific interfacial surface area p['a_s_n'] = 3*p['epsilon_s_n'] / p['R_s_n'] # Negative electrode [m^2/m^3] p['a_s_p'] = 3*p['epsilon_s_p'] / p['R_s_p'] # Positive electrode [m^2/m^3] #============================================================================== # Transport params #============================================================================== p['D_s_n0'] = 2.63029669224544e-14 # Diffusion coeff for solid in neg. electrode, [m^2/s] p['D_s_p0'] = 6.81035680483463e-14 # Diffusion coeff for solid in pos. electrode, [m^2/s] # Conductivity of solid p['sig_n'] = 100 # Conductivity of solid in neg. electrode, [1/Ohms*m] p['sig_p'] = 100 # Conductivity of solid in pos. electrode, [1/Ohms*m] #============================================================================== # Kinetic params #============================================================================== p['R_f_n'] = 0 # Resistivity of SEI layer, [Ohms*m^2] p['R_f_p'] = 0 # Resistivity of SEI layer, [Ohms*m^2] #p.R_c = 2.5e-03;%5.1874e-05/p.Area; % Contact Resistance/Current Collector Resistance, [Ohms-m^2] # Nominal Reaction rates p['k_n0'] = 7.50e-03 # Reaction rate in neg. electrode, [(A/m^2)*(mol^3/mol)^(1+alpha)] p['k_p0'] = 2.30e-03 # Reaction rate in pos. electrode, [(A/m^2)*(mol^3/mol)^(1+alpha)] #============================================================================== # Thermodynamic params #============================================================================== # Thermal dynamics p['C_p'] = 2000 # Heat capacity, [J/kg-K] p['R_th'] = 2 # Thermal resistance, [K/W] p['mth'] = 0.834 # Mass of cell [Kg] # Activation Energies # Taken from Zhang et al (2014) [Harbin] # http://dx.doi.org/10.1016/j.jpowsour.2014.07.110 # All units are [J/mol] p['E_kn'] = 37.48e+3 p['E_kp'] = 39.57e+3 p['E_Dsn'] = 42.77e+3 p['E_Dsp'] = 18.55e+3 p['E_De'] = 37.04e+3 p['E_kappa_e'] = 34.70e+3 # Ambient Temperature p['T_amb'] = 298.15 # [K] p['T_ref'] = 298.15 # [K] for ElectrolyteACT #============================================================================== # Miscellaneous #============================================================================== p['R'] = 8.314472; # Gas constant, [J/mol-K] p['Faraday'] = 96485.3329 # Faraday constant [Coulombs/mol] p['Area'] = 1.425 # Electrode current collector area [m^2] p['alph'] = 0.5 # Charge transfer coefficients p['t_plus'] = 0.45 # Transference number p['brug'] = 1.8 # Bruggeman porosity #============================================================================== # Concentrations #============================================================================== p['c_s_n_max'] = 3.71e+04 # Max concentration in anode, [mol/m^3] p['c_s_p_max'] = 5.10e+04 # Max concentration in cathode, [mol/m^3] p['n_Li_s'] = 0.1406 # Total moles of lithium in solid phase [mol] p['c_e0'] = 1.0e3 # Electrolyte concentration [mol/m^3] #============================================================================== # Discretization params #============================================================================== p['PadeOrder'] = 3 p['Nr'] = 20 p['delta_r_n'] = 1/float(p['Nr']) p['delta_r_p'] = 1/float(p['Nr']) p['Nxn'] = 10; p['Nxs'] = 5; p['Nxp'] = 10; p['Nx'] = p['Nxn']+p['Nxs']+p['Nxp'] p['delta_x_n'] = 1 / float(p['Nxn']) p['delta_x_s'] = 1 / float(p['Nxs']) p['delta_x_p'] = 1 / float(p['Nxp']) def refPotentialAnode_casadi(theta): c_n=np.array([-0.084294960339275, 0.920754744005144, -0.500066623566425, 0.062731837918546, 0.782151587417570, -0.373761901864611, 0.019988184317997, 0.543282314780430, -0.295609630222051, 0.040970248093866, 0.231152288743602, -0.217847875913234, 0.068744203951316, 0.353848415118256, -0.114753994434564, -0.028613032233089, 0.260671608316041, -0.212058177468640, -0.025506157489854, 0.211741908826122, -0.241880220004548, 0.188872027034948, 0.867520021192469, -0.225038983698359, -0.111904175370177, 0.537399173641857, -0.020780743382893, 0.108353745941168, 0.537735904911254, -0.020226723056513, 0.171375773597772, 0.729717193288193, -0.323902793428930, 0.064143152162965, 1.289849595601526, 0.704961322280748, 0.023028206444624, 0.481699223765299, -0.076233450161839, -0.182559256738691, 0.830851470359638, -0.226362977193547, -0.040952011143767, 1.626936110900125, 0.295695270567609, -1.000228763094078, 0.007914258576845, -0.016476666187381, -0.341740372496750, 0.001274961492701, -0.004879090290810, -0.930906698538900, 0.001549868904555, -0.010583717929547, 2.554274538083029, -0.012402969675540, -0.029257893810540, -0.512533408582419, 0.066122834568301, -0.077930639597751, -0.499673574757569, 0.044470609922510, -0.134483437256594, 1.904111886758372, -0.035336812622768, -0.306171040837701, -1.122974595772499, 0.028740372472439, -0.079271479637875, -0.093855421675871, 0.930843806570863, -0.516652668839875, -0.846383609865041, 0.012151749801329, -0.029511731110250, -0.561782895480513, 0.098392530745244, -0.109853910868333, -0.818206413176353, 0.026850808833446, -0.051805538572186, -0.525543070925015, 0.188590232596615, -0.192054642003214, -0.046580230674248, 0.002863828671823, -0.000914487593373, 2.650656293235332, -0.008182255230700, -0.117937922743741, -0.295664205008775, 0.137690106957231, -0.310460986123659, -0.835065551163236, 0.711574616090746, -0.997353098073145, 0.415746756470558, 0.423984781966332, 3.189835673119072, 0.413779708001205, 0.426343693564050, 3.190867502582611]) Uref=c_n[0]*np.exp(-((theta - c_n[1])**2/c_n[2]**2))+ \ c_n[3]*np.exp(-((theta - c_n[4])**2/c_n[5]**2))+ \ c_n[6]*np.exp(-((theta - c_n[7])**2/c_n[8]**2))+ \ c_n[9]*np.exp(-((theta - c_n[10])**2/c_n[11]**2))+ \ c_n[12]*np.exp(-((theta - c_n[13])**2/c_n[14]**2))+ \ c_n[15]*np.exp(-((theta - c_n[16])**2/c_n[17]**2))+ \ c_n[18]*np.exp(-((theta - c_n[19])**2/c_n[20]**2))+ \ c_n[21]*np.exp(-((theta - c_n[22])**2/c_n[23]**2))+ \ c_n[24]*np.exp(-((theta - c_n[25])**2/c_n[26]**2))+ \ c_n[27]*np.exp(-((theta - c_n[28])**2/c_n[29]**2))+ \ c_n[30]*np.exp(-((theta - c_n[31])**2/c_n[32]**2))+ \ c_n[33]*np.exp(-((theta - c_n[34])**2/c_n[35]**2))+ \ c_n[36]*np.exp(-((theta - c_n[37])**2/c_n[38]**2))+ \ c_n[39]*np.exp(-((theta - c_n[40])**2/c_n[41]**2))+ \ c_n[42]*np.exp(-((theta - c_n[43])**2/c_n[44]**2))+ \ c_n[45]*np.exp(-((theta - c_n[46])**2/c_n[47]**2))+ \ c_n[48]*np.exp(-((theta - c_n[49])**2/c_n[50]**2))+ \ c_n[51]*np.exp(-((theta - c_n[52])**2/c_n[53]**2))+ \ c_n[54]*np.exp(-((theta - c_n[55])**2/c_n[56]**2))+ \ c_n[57]*np.exp(-((theta - c_n[58])**2/c_n[59]**2))+ \ c_n[60]*np.exp(-((theta - c_n[61])**2/c_n[62]**2))+ \ c_n[63]*np.exp(-((theta - c_n[64])**2/c_n[65]**2))+ \ c_n[66]*np.exp(-((theta - c_n[67])**2/c_n[68]**2))+ \ c_n[69]*np.exp(-((theta - c_n[70])**2/c_n[71]**2))+ \ c_n[72]*np.exp(-((theta - c_n[73])**2/c_n[74]**2))+ \ c_n[75]*np.exp(-((theta - c_n[76])**2/c_n[77]**2))+ \ c_n[78]*
np.exp(-((theta - c_n[79])**2/c_n[80]**2))
numpy.exp
import os import unittest from unittest import mock from unittest.mock import MagicMock import numpy as np import pandas as pd import redback dirname = os.path.dirname(__file__) class TestTransient(unittest.TestCase): def setUp(self) -> None: self.time = np.array([1, 2, 3]) self.time_err = np.array([0.2, 0.3, 0.4]) self.y = np.array([3, 4, 2]) self.y_err = np.sqrt(self.y) self.redshift = 0.75 self.data_mode = 'counts' self.name = "GRB123456" self.photon_index = 2 self.use_phase_model = False self.transient = redback.transient.transient.Transient( time=self.time, time_err=self.time_err, counts=self.y, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=self.use_phase_model) def tearDown(self) -> None: del self.time del self.time_err del self.y del self.y_err del self.redshift del self.data_mode del self.name del self.photon_index del self.use_phase_model del self.transient def test_ttes_data_mode_setting(self): bin_ttes = MagicMock(return_value=(self.time, self.y)) ttes = np.arange(0, 1, 1000) self.data_mode = 'ttes' self.bin_size = 0.1 self.transient = redback.transient.transient.Transient( ttes=ttes, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, bin_ttes=bin_ttes) bin_ttes.assert_called_once() def test_data_mode_switches(self): self.assertTrue(self.transient.counts_data) self.assertFalse(self.transient.luminosity_data) self.assertFalse(self.transient.flux_data) self.assertFalse(self.transient.flux_density_data) self.assertFalse(self.transient.magnitude_data) self.assertFalse(self.transient.tte_data) def test_set_data_mode_switch(self): self.transient.flux_data = True self.assertTrue(self.transient.flux_data) self.assertFalse(self.transient.counts_data) def test_get_time_via_x(self): self.assertTrue(np.array_equal(self.time, self.transient.x)) self.assertTrue(np.array_equal(self.time_err, self.transient.x_err)) def test_get_time_via_x_luminosity_data(self): new_times = np.array([1, 2, 3]) new_time_errs = np.array([0.1, 0.2, 0.3]) self.transient.time_rest_frame = new_times self.transient.time_rest_frame_err = new_time_errs self.transient.data_mode = "luminosity" self.assertTrue(np.array_equal(new_times, self.transient.x)) self.assertTrue(np.array_equal(new_time_errs, self.transient.x_err)) def test_x_same_as_time(self): self.assertTrue(np.array_equal(self.transient.x, self.transient.time)) def test_xerr_same_as_time_err(self): self.assertTrue(np.array_equal(self.transient.x_err, self.transient.time_err)) def test_set_use_phase_model(self): self.assertFalse(self.transient.use_phase_model) def test_xlabel(self): self.assertEqual(r"Time since burst [days]", self.transient.xlabel) self.transient.use_phase_model = True self.assertEqual(r"Time [MJD]", self.transient.xlabel) def test_ylabel(self): self.assertEqual(r'Counts', self.transient.ylabel) self.transient.luminosity_data = True self.assertEqual(r'Luminosity [$10^{50}$ erg s$^{-1}$]', self.transient.ylabel) self.transient.magnitude_data = True self.assertEqual(r'Magnitude', self.transient.ylabel) self.transient.flux_data = True self.assertEqual(r'Flux [erg cm$^{-2}$ s$^{-1}$]', self.transient.ylabel) self.transient.flux_density_data = True self.assertEqual(r'Flux density [mJy]', self.transient.ylabel) self.transient.flux_density_data = False with self.assertRaises(ValueError): _ = self.transient.ylabel def test_use_phase_model_time_attribute(self): self.transient = redback.transient.transient.Transient( time_mjd=self.time, time_mjd_err=self.time_err, counts=self.y, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=True) self.assertTrue(np.array_equal(self.transient.time_mjd, self.transient.x)) self.assertTrue(np.array_equal(self.transient.time_mjd_err, self.transient.x_err)) def test_set_x(self): new_x = np.array([2, 3, 4]) self.transient.x = new_x self.assertTrue(np.array_equal(new_x, self.transient.x)) self.assertTrue(np.array_equal(new_x, self.transient.time)) def test_set_x_err(self): new_x_err = np.array([3, 4, 5]) self.transient.x_err = new_x_err self.assertTrue(np.array_equal(new_x_err, self.transient.x_err)) self.assertTrue(np.array_equal(new_x_err, self.transient.time_err)) def test_set_y(self): new_y = np.array([7, 8, 9]) self.transient.y = new_y self.assertTrue(np.array_equal(new_y, self.transient.y)) self.assertTrue(np.array_equal(new_y, self.transient.counts)) def test_set_y_err(self): new_y_err = np.array([7, 8, 9]) self.transient.y_err = new_y_err self.assertTrue(np.array_equal(new_y_err, self.transient.y_err)) self.assertTrue(np.array_equal(new_y_err, self.transient.counts_err)) def test_y_same_as_counts(self): self.assertTrue(np.array_equal(self.transient.y, self.transient.counts)) def test_yerr_same_as_counts(self): self.assertTrue(np.array_equal(self.transient.y_err, self.transient.counts_err)) def test_redshift(self): self.assertEqual(self.redshift, self.transient.redshift) def test_get_data_mode(self): self.assertEqual(self.data_mode, self.transient.data_mode) def test_set_data_mode(self): new_data_mode = "luminosity" self.transient.data_mode = new_data_mode self.assertEqual(new_data_mode, self.transient.data_mode) def test_set_illegal_data_mode(self): with self.assertRaises(ValueError): self.transient.data_mode = "abc" def test_plot_lightcurve(self): pass # self.transient.plot_lightcurve(model=None) def test_plot_data(self): pass # self.transient.plot_data() class TestOpticalTransient(unittest.TestCase): def setUp(self) -> None: self.time = np.array([1, 2, 3]) self.time_err = np.array([0.2, 0.3, 0.4]) self.y = np.array([3, 4, 2]) self.y_err = np.sqrt(self.y) self.redshift = 0.75 self.data_mode = 'flux_density' self.name = "SN2000A" self.photon_index = 2 self.use_phase_model = False self.bands = np.array(['i', 'g', 'g']) self.active_bands = np.array(['g']) self.transient = redback.transient.transient.OpticalTransient( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands) def tearDown(self) -> None: del self.time del self.time_err del self.y del self.y_err del self.redshift del self.data_mode del self.name del self.photon_index del self.use_phase_model del self.bands del self.active_bands del self.transient def test_load_data_magnitude(self): name = "optical_transient_test_data" transient_dir = f"{dirname}/data" processed_file_path = f"{transient_dir}/{name}.csv" data_mode = "magnitude" time_days, time_mjd, magnitude, magnitude_err, bands, system = \ self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode) expected_time_days = np.array([0.4813999999969383, 0.49020000000018626]) expected_time_mjd = np.array([57982.9814, 57982.9902]) expected_magnitude = np.array([17.48, 18.26]) expected_magnitude_err = np.array([0.02, 0.15]) expected_bands = np.array(["i", "H"]) expected_system = np.array(["AB", "AB"]) self.assertTrue(np.allclose(expected_time_days, time_days)) self.assertTrue(np.allclose(expected_time_mjd, time_mjd)) self.assertTrue(np.allclose(expected_magnitude, magnitude)) self.assertTrue(np.allclose(expected_magnitude_err, magnitude_err)) self.assertTrue(np.array_equal(expected_bands, bands)) self.assertTrue(np.array_equal(expected_system, system)) def test_load_data_flux_density(self): name = "optical_transient_test_data" transient_dir = f"{dirname}/data" data_mode = "flux_density" processed_file_path = f"{transient_dir}/{name}.csv" time_days, time_mjd, flux_density, flux_density_err, bands, system = \ self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode) expected_time_days = np.array([0.4813999999969383, 0.49020000000018626]) expected_time_mjd = np.array([57982.9814, 57982.9902]) expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559]) expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914]) expected_bands = np.array(["i", "H"]) expected_system = np.array(["AB", "AB"]) self.assertTrue(np.allclose(expected_time_days, time_days)) self.assertTrue(np.allclose(expected_time_mjd, time_mjd)) self.assertTrue(np.allclose(expected_flux_density, flux_density)) self.assertTrue(np.allclose(expected_flux_density_err, flux_density_err)) self.assertTrue(np.array_equal(expected_bands, bands)) self.assertTrue(np.array_equal(expected_system, system)) def test_load_data_all(self): name = "optical_transient_test_data" transient_dir = f"{dirname}/data" processed_file_path = f"{transient_dir}/{name}.csv" data_mode = "all" time_days, time_mjd, flux_density, flux_density_err, magnitude, magnitude_err, bands, system = \ self.transient.load_data(processed_file_path=processed_file_path, data_mode=data_mode) expected_time_days = np.array([0.4813999999969383, 0.49020000000018626]) expected_time_mjd = np.array([57982.9814, 57982.9902]) expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559]) expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914]) expected_magnitude = np.array([17.48, 18.26]) expected_magnitude_err = np.array([0.02, 0.15]) expected_bands = np.array(["i", "H"]) expected_system = np.array(["AB", "AB"]) self.assertTrue(np.allclose(expected_time_days, time_days)) self.assertTrue(np.allclose(expected_time_mjd, time_mjd)) self.assertTrue(np.allclose(expected_flux_density, flux_density)) self.assertTrue(np.allclose(expected_flux_density_err, flux_density_err)) self.assertTrue(np.allclose(expected_magnitude, magnitude)) self.assertTrue(np.allclose(expected_magnitude_err, magnitude_err)) self.assertTrue(np.array_equal(expected_bands, bands)) self.assertTrue(np.array_equal(expected_system, system)) def test_get_from_open_access_catalogue(self): with mock.patch("redback.transient.transient.OpticalTransient.load_data") as m: expected_time_days = np.array([0.4813999999969383, 0.49020000000018626]) expected_time_mjd = np.array([57982.9814, 57982.9902]) expected_flux_density = np.array([0.36982817978026444, 0.1803017740859559]) expected_flux_density_err = np.array([0.006812898591418732, 0.024911116226263914]) expected_magnitude = np.array([17.48, 18.26]) expected_magnitude_err = np.array([0.02, 0.15]) expected_bands = np.array(["i", "H"]) expected_system = np.array(["AB", "AB"]) m.return_value = \ expected_time_days, expected_time_mjd, expected_flux_density, expected_flux_density_err, \ expected_magnitude, expected_magnitude_err, expected_bands, expected_system name = "test" transient = redback.transient.transient.OpticalTransient.from_open_access_catalogue(name=name) self.assertTrue(transient.magnitude_data) self.assertEqual(name, transient.name) self.assertTrue(np.allclose(expected_time_days, transient.time)) self.assertTrue(np.allclose(expected_time_mjd, transient.time_mjd)) self.assertTrue(np.allclose(expected_flux_density, transient.flux_density)) self.assertTrue(np.allclose(expected_flux_density_err, transient.flux_density_err)) self.assertTrue(np.allclose(expected_magnitude, transient.magnitude)) self.assertTrue(np.allclose(expected_magnitude_err, transient.magnitude_err)) self.assertTrue(np.array_equal(expected_bands, transient.bands)) self.assertTrue(np.array_equal(expected_system, transient.system)) def test_set_active_bands(self): self.assertTrue(np.array_equal(np.array(self.active_bands), self.transient.active_bands)) def test_set_active_bands_all(self): self.transient = redback.transient.transient.OpticalTransient( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands, active_bands='all') self.assertTrue(np.array_equal(np.array(['g', 'i']), self.transient.active_bands)) def test_set_frequencies_from_bands(self): expected = [1, 2, 2] bands_to_frequency = MagicMock(return_value=expected) self.transient = redback.transient.transient.OpticalTransient( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, bands_to_frequency=bands_to_frequency) self.assertTrue(np.array_equal(expected, self.transient.frequency)) bands_to_frequency.assert_called_once() def test_set_frequencies_default(self): frequency = np.array([1, 2, 2]) self.transient = redback.transient.transient.OpticalTransient( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, redshift=self.redshift, data_mode=self.data_mode, name=self.name, photon_index=self.photon_index, use_phase_model=self.use_phase_model, bands=self.bands, frequency=frequency, active_bands=self.active_bands) self.assertTrue(np.array_equal(frequency, self.transient.frequency)) def test_get_filtered_data(self): filtered_x, filtered_x_err, filtered_y, filtered_y_err = self.transient.get_filtered_data() expected_x = self.time[1:] expected_x_err = self.time_err[1:] expected_y = self.y[1:] expected_y_err = self.y_err[1:] self.assertTrue(np.array_equal(expected_x, filtered_x)) self.assertTrue(np.array_equal(expected_x_err, filtered_x_err)) self.assertTrue(np.array_equal(expected_y, filtered_y)) self.assertTrue(np.array_equal(expected_y_err, filtered_y_err)) def test_get_filtered_data_no_x_err(self): self.transient.x_err = None _, filtered_x_err, _, _ = self.transient.get_filtered_data() self.assertIsNone(filtered_x_err) def test_get_filtered_data_illegal_data_mode(self): with self.assertRaises(ValueError): self.transient.luminosity_data = True self.transient.get_filtered_data() def test_meta_data_not_available(self): self.assertIsNone(self.transient.meta_data) @mock.patch("pandas.read_csv") def test_meta_data_from_csv(self, read_csv): self.transient.directory_structure = redback.get_data.directory.DirectoryStructure( directory_path='data', raw_file_path=None, processed_file_path=None) expected = dict(a=1) read_csv.return_value = expected self.transient._set_data() self.assertDictEqual(expected, self.transient.meta_data) def test_transient_dir(self): with mock.patch('redback.get_data.directory.open_access_directory_structure') as m: expected = 'expected' m.return_value = expected, '_', '_' self.assertEqual(expected, self.transient.transient_dir) def test_unique_bands(self): expected = np.array(['g', 'i']) self.assertTrue(np.array_equal(expected, self.transient.unique_bands)) def test_list_of_band_indices(self): expected = [np.array([1, 2]), np.array([0])] self.assertTrue(np.array_equal(expected[0], self.transient.list_of_band_indices[0])) self.assertTrue(np.array_equal(expected[1], self.transient.list_of_band_indices[1])) def test_default_colors(self): expected = ["g", "r", "i", "z", "y", "J", "H", "K"] self.assertListEqual(expected, self.transient.default_filters) def test_get_colors(self): with mock.patch('matplotlib.cm.rainbow') as m: expected = 'rainbow' m.return_value = expected self.assertEqual(expected, self.transient.get_colors(filters=['a', 'b'])) class TestAfterglow(unittest.TestCase): def setUp(self) -> None: self.time = np.array([1, 2, 3]) self.time_err = np.array([0.2, 0.3, 0.4]) self.y = np.array([3, 4, 2]) self.y_err = np.sqrt(self.y) self.data_mode = 'flux' self.name = "GRB070809" self.use_phase_model = False self.bands = np.array(['i', 'g', 'g']) self.active_bands = np.array(['g']) self.FluxToLuminosityConverter = MagicMock() self.Truncator = MagicMock() self.sgrb = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode=self.data_mode, name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) self.sgrb_luminosity = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode="luminosity", name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) self.sgrb_flux_density = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode="flux_density", name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) self.sgrb_not_existing = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode=self.data_mode, name="123456", use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) self.sgrb_magnitude = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, magnitude=self.y, magnitude_err=self.y_err, data_mode="magnitude", name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) self.sgrb_all_active_bands = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode=self.data_mode, name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands='all', FluxToLuminosityConverter=self.FluxToLuminosityConverter, Truncator=self.Truncator) def tearDown(self) -> None: del self.time del self.time_err del self.y del self.y_err del self.data_mode del self.name del self.use_phase_model del self.bands del self.active_bands del self.sgrb del self.sgrb_not_existing del self.sgrb_magnitude del self.sgrb_all_active_bands del self.FluxToLuminosityConverter def test_stripped_name(self): expected = "070809" self.assertEqual(expected, self.sgrb._stripped_name) def test_truncate(self): expected_x = 0 expected_x_err = 1 expected_y = 2 expected_yerr = 3 return_value = expected_x, expected_x_err, expected_y, expected_yerr truncator = MagicMock(return_value=MagicMock(truncate=MagicMock(return_value=return_value))) self.sgrb.Truncator = truncator self.sgrb.truncate() self.assertListEqual( [expected_x, expected_x_err, expected_y, expected_yerr], [self.sgrb.x, self.sgrb.x_err, self.sgrb.y, self.sgrb.y_err]) def test_set_active_bands(self): self.assertTrue(np.array_equal(np.array(self.active_bands), self.sgrb.active_bands)) def test_set_active_bands_all(self): self.assertTrue(np.array_equal(np.array(['g', 'i']), self.sgrb_all_active_bands.active_bands)) def test_set_frequencies_from_bands(self): expected = [1, 2, 2] bands_to_frequency = MagicMock(return_value=expected) self.sgrb = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode=self.data_mode, name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, active_bands=self.active_bands, bands_to_frequency=bands_to_frequency) self.assertTrue(np.array_equal(expected, self.sgrb.frequency)) bands_to_frequency.assert_called_once() def test_set_frequencies_default(self): frequency = np.array([1, 2, 2]) self.sgrb = redback.transient.afterglow.SGRB( time=self.time, time_err=self.time_err, flux_density=self.y, flux_density_err=self.y_err, data_mode=self.data_mode, name=self.name, use_phase_model=self.use_phase_model, bands=self.bands, frequency=frequency, active_bands=self.active_bands) self.assertTrue(np.array_equal(frequency, self.sgrb.frequency)) def test_get_filtered_data(self): filtered_x, filtered_x_err, filtered_y, filtered_y_err = self.sgrb_magnitude.get_filtered_data() expected_x = self.time[1:] expected_x_err = self.time_err[1:] expected_y = self.y[1:] expected_y_err = self.y_err[1:] self.assertTrue(np.array_equal(expected_x, filtered_x)) self.assertTrue(np.array_equal(expected_x_err, filtered_x_err)) self.assertTrue(np.array_equal(expected_y, filtered_y)) self.assertTrue(np.array_equal(expected_y_err, filtered_y_err)) def test_get_filtered_data_no_x_err(self): self.sgrb_magnitude.x_err = None _, filtered_x_err, _, _ = self.sgrb_magnitude.get_filtered_data() self.assertIsNone(filtered_x_err) def test_get_filtered_data_illegal_data_mode(self): with self.assertRaises(ValueError): self.sgrb.get_filtered_data() def test_event_table(self): expected = "/tables/SGRB_table.txt" self.assertIn(expected, self.sgrb.event_table) def test_meta_data_from_csv(self): with mock.patch("pandas.read_csv") as m: field_name = 'BAT Photon Index (15-150 keV) (PL = simple power-law, CPL = cutoff power-law)' data_frame = pd.DataFrame.from_dict({field_name: [0, 1, np.nan]}) m.return_value = data_frame expected = np.array([0, 1, 0]) self.sgrb._set_data() self.assertTrue(np.array_equal(expected, np.array(self.sgrb.meta_data[field_name]))) def test_photon_index(self): self.assertEqual(1.69, self.sgrb.photon_index) def test_photon_index_missing(self): self.assertTrue(
np.isnan(self.sgrb_not_existing.photon_index)
numpy.isnan
# ====== # # Eval # # ====== # # Libraries from datasets import ToyDatasetEval # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason) from model import ToyNet import torch import torch.utils.data import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F import numpy as np import pickle import matplotlib as mpl # matplotlib.use("Agg") import matplotlib.pyplot as plt import cv2 # Eval Params batch_size = 32 M = 64 max_logvar = 2.0 # Load Evaluation Dataset print("Loading evaluation dataset...") val_dataset = ToyDatasetEval() num_val_batches = int(len(val_dataset)/batch_size) print("num_val_batches: {}\n".format(num_val_batches)) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False) # Load Trained Models networks = [] print("Loading trained models...") for i in range(M): network = ToyNet("eval_Ensemble-Adam_1_M1024", project_dir="/workspace/evaluating_bdl/toyRegression").cuda() network.load_state_dict(torch.load("/workspace/evaluating_bdl/toyRegression/training_logs/model_Ensemble-Adam_1_M1024_%d/checkpoints/model_Ensemble-Adam_1_M1024_epoch_150.pth" % i)) network.eval() # (set in evaluation mode, this affects BatchNorm and dropout) networks.append(network) M = float(len(networks)) print (M) # History x_values = [] final_mean_values = [] final_sigma_tot_values = [] # Total Uncertainty Values final_sigma_epi_values = [] # Episdemic Uncertainty Values final_sigma_alea_values = [] # Aleatoric Uncertainty Values print("Evaluating models...") for step, (x) in enumerate(val_loader): # Convert to Cuda Tensor and add dimension, [batch_size] -> [batch_size, 1] x = Variable(x).cuda().unsqueeze(1) # (shape: (batch_size, 1)) means = [] log_vars = [] for network in networks: # Predict outputs = network(x) mean = outputs[0] # (shape: (batch_size, )) log_var = outputs[1] # (shape: (batch_size, )) (log(sigma^2)) log_var = max_logvar - F.relu(max_logvar - log_var) means.append(mean) log_vars.append(log_var) for i in range(x.size(0)): x_value = x[i].data.cpu().numpy()[0] # Retrieve mean, var values from the GPU tensors mean_values = [] for mean in means: mean_value = mean[i].data.cpu().numpy()[0] mean_values.append(mean_value) sigma_alea_values = [] for log_var in log_vars: sigma_alea_value = torch.exp(log_var[i]).data.cpu().numpy()[0] sigma_alea_values.append(sigma_alea_value) # Average Mean, Aleatoric and Epistemic Uncertainties values. # Please refer to Appendix A - Approximating a mixture of Gaussian distributions # The variables in each one of the following for's will be iterated M times. mean_value = 0.0 for value in mean_values: mean_value += value/M # Average of All the Predicted Mean(x), hat{mu}(x) sigma_epi_value = 0.0 for value in mean_values: sigma_epi_value += ((value - mean_value)**2)/M sigma_alea_value = 0.0 for value in sigma_alea_values: sigma_alea_value += value/M sigma_tot_value = sigma_epi_value + sigma_alea_value # print(sigma_tot_value) x_values.append(x_value) # (1000, ) final_mean_values.append(mean_value) # (1000, ) final_sigma_epi_values.append(sigma_epi_value) # (1000, ) final_sigma_alea_values.append(sigma_alea_value) # (1000, ) final_sigma_tot_values.append(sigma_tot_value) # (1000, ) # ============================== # # Predictive Uncertainty Plots # # ============================== # print("Preparing Plots...") plt.figure(1) plt.plot(x_values, final_mean_values, "r", label=r"Predicted: $\hat{\mu}(x)$") plt.fill_between(x_values,
np.array(final_mean_values)
numpy.array
import numpy as np import pandas as pd import pytest from scipy import stats from locan import LocData from locan.analysis import BlinkStatistics from locan.analysis.blinking import _blink_statistics, _DistributionFits def test__blink_statistics_0(): # frame with on and off periods up to three frames and starting with one-frame on-period. frames = np.array([0, 4, 6, 7, 8, 12, 13]) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [1, 1, 3, 2]) assert np.array_equal(results["off_periods"], [3, 1, 3]) assert np.array_equal(results["on_periods_frame"], [0, 4, 6, 12]) assert np.array_equal(results["off_periods_frame"], [1, 5, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0], [1], [2, 3, 4], [5, 6]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [1, 5, 2]) assert np.array_equal(results["off_periods"], [3, 3]) assert np.array_equal(results["on_periods_frame"], [0, 4, 12]) assert np.array_equal(results["off_periods_frame"], [1, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0], [1, 2, 3, 4], [5, 6]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [14]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6]]) ] ) def test__blink_statistics_1(): # frame with on and off periods up to three frames and starting with two-frame on-period. frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [1, 2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 3, 6, 12]) assert np.array_equal(results["off_periods_frame"], [2, 4, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [4, 3, 2]) assert np.array_equal(results["off_periods"], [2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 6, 12]) assert np.array_equal(results["off_periods_frame"], [4, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [14]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]] ) ] ) def test__blink_statistics_2(): # frame with on and off periods up to three frames and starting with two-frame on-period. frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) + 1 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [1, 1, 2, 3]) assert np.array_equal(results["on_periods_frame"], [1, 4, 7, 13]) assert np.array_equal(results["off_periods_frame"], [0, 3, 5, 10]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [5, 3, 2]) assert np.array_equal(results["off_periods"], [2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 7, 13]) assert np.array_equal(results["off_periods_frame"], [5, 10]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [15]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]] ) ] ) def test__blink_statistics_3(): # frame with on and off periods up to three frames and starting with off-period. frames = np.array([0, 1, 4, 6, 7, 8, 12, 13]) + 4 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [4, 2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert np.array_equal(results["off_periods_frame"], [6, 9, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=2, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [4, 3]) assert np.array_equal(results["on_periods_frame"], [4, 16]) assert np.array_equal(results["off_periods_frame"], [0, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=2, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [3]) assert np.array_equal(results["on_periods_frame"], [4, 16]) assert np.array_equal(results["off_periods_frame"], [13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [18]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert np.array_equal(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]) def test__blink_statistics_4(): # frame with on and off periods up to three frames and starting with off-period. frames = np.array([0, 1, 4, 6, 12, 13]) + 2 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 1, 2]) assert np.array_equal(results["off_periods"], [2, 2, 1, 5]) assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14]) assert np.array_equal(results["off_periods_frame"], [0, 4, 7, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]] ) ] ) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 1, 2]) assert np.array_equal(results["off_periods"], [2, 1, 5]) assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14]) assert np.array_equal(results["off_periods_frame"], [4, 7, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]] ) ] ) results = _blink_statistics(frames, memory=3, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [5]) assert np.array_equal(results["on_periods_frame"], [0, 14]) assert np.array_equal(results["off_periods_frame"], [9]) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]]) ] ) results = _blink_statistics(frames, memory=3, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [7, 2]) assert np.array_equal(results["off_periods"], [5]) assert np.array_equal(results["on_periods_frame"], [2, 14]) assert np.array_equal(results["off_periods_frame"], [9]) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]]) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [16]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5]]) ] ) def test__blink_statistics_5(caplog): # frame with on and off periods including repeated frames. frames = np.array([0, 1, 4, 4, 6, 7, 8, 12, 12, 13]) + 4 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [4, 2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert
np.array_equal(results["off_periods_frame"], [0, 6, 9, 13])
numpy.array_equal
# -*- coding: utf-8 -*- """ Created on Sun Nov 10 15:57:42 2019 @author: mahal """ import numpy as np import scipy.stats as ss import heapq class Cell: def __init__(self, cell_coord, instantiated_motion, instantiated_iteration): self.coord = cell_coord self.coverage = 1 self.instantiated_iteration = instantiated_iteration + 1 self.score = 1 self.importance = 0 self.cntNeighbors = 0 self.numExpansion = 1 self.motions = [instantiated_motion] self.coverage_motion_cnt = -1 def selectMotion(self): num_motions = len(self.motions) x =
np.arange(num_motions)
numpy.arange
import os, io import numpy as np import tensorflow from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D, BatchNormalization from tensorflow.keras.layers import Activation import cv2 from sklearn.model_selection import train_test_split import time from PIL import Image def load_train_data(way_x, way_y): train_x, train_y = [], [] for j in sorted(os.listdir(way_x)): flore_x = os.path.join(way_x, j) flore_y = os.path.join(way_y, j) for i in sorted(os.listdir(flore_x)): image = cv2.imread(os.path.join(flore_x, i)) image = cv2.resize(image, (455, 256)) image = np.asarray(image) if 'lost_image' in locals(): frame_to_frame = np.concatenate([lost_image, image], axis=2) lost_image = image train_x.append(frame_to_frame) else: lost_image = image if os.path.isfile(os.path.join(flore_y, i)): print(i) image = cv2.imread(os.path.join(flore_y, i)) image = cv2.resize(image, (455, 256)) image = np.asarray(image) train_y.append(image) del lost_image train_x = np.asarray(train_x) train_y = np.asarray(train_y) train_x = train_x / 255 train_y = train_y / 255 return train_x, train_y def model_init(input_shape): model = Sequential() model.add(Conv2D(16, (3, 3), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(32, (3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(UpSampling2D(size=(2, 2))) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(32, (2, 2), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(32, (3, 3), padding='same')) model.add(Activation('relu')) model.add(UpSampling2D(size=(2, 2))) model.add(ZeroPadding2D(padding=((0, 0), (1, 2)))) # Zero padding for fitting output layers shape model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(16, (2, 2), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(16, (3, 3), padding='same')) model.add(Activation('relu')) model.add(BatchNormalization(batch_size=16)) model.add(Conv2D(3, (1, 1))) model.add(Activation('sigmoid')) model.compile( optimizer="adam", loss=tensorflow.losses.binary_crossentropy, metrics=["accuracy"]) return model def train(model, train_x, train_y, test_x, test_y, epochs=20, batch_size=16): model.fit( train_x, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y) ) model.save_weights('1.h5') return model def write_video(file_path): model.load_weights(input("Путь к файлу весов: ")) cap = cv2.VideoCapture(file_path) h, w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) fps = int(cap.get(cv2.CAP_PROP_FPS)) buf = [] images = [] lost_time = time.time() all_time = time.time() length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1 count = 0 agr_time = 0 avr_img = 0 step_x = 10 step_y = 10 ret, control_image = cap.read() if ret: control_image = cv2.resize(control_image, (455, 256)) control_image =
np.asarray(control_image)
numpy.asarray
import numpy as np def get_distances(data, factual, counterfactual): """ Computes distances 1 to 4 :param data: Dataframe with original data :param factual: List of features :param counterfactual: List of features :return: Array of distances 1 to 4 """ d1 = d1_distance(factual, counterfactual) d2 = d2_distance(factual, counterfactual, data) d3 = d3_distance(factual, counterfactual, data) d4 = d4_distance(factual, counterfactual) return np.array([d1, d2, d3, d4]) def d1_distance(instance, cf): """ Compute d1-distance :param instance: List of original feature :param cf: List of counterfactual feature :return: Scalar number """ # get difference between original and counterfactual delta = get_delta(instance, cf) # compute elements which are greater than 0 delta_bin = [i != 0 for i in delta] delta_bin = delta_bin[:-1] # loose label column d1 = sum(delta_bin) return d1 def d2_distance(instance, cf, df): """ Compute d2 distance :param instance: List of original feature :param cf: List of counterfactual feature :param df: Dataframe object of dataset :return: Scalar number """ # get difference between original and counterfactual delta = get_delta(instance, cf) delta = delta[:-1] # loose label column # get range of every feature range = get_range(df) d2 = [
np.abs(x[0] / x[1])
numpy.abs
# features.py - feature extraction and plotting # Bregman - music information retrieval toolkit """ Overview ======== To get features from audio signals, or any signal, use a feature extractor class. Feature extractors are derived from the base Features class in module **features_base**. Instantiate any feature extractor class with a signal vector, or audio file name, and feature parameter keywrod arguments. E.g.:: :: myFeatures = featureExtractor(fileName, param1=value1, param2=value2, ...) The global default feature-extractor parameters are defined in a parameter dictionary: :: default_feature_params() { 'sample_rate': 44100, # The audio sample rate 'feature':'cqft', # Which feature to extract (automatic for Features derived classes) 'nbpo': 12, # Number of Bands Per Octave for front-end filterbank 'ncoef' : 10, # Number of cepstral coefficients to use for cepstral features 'lcoef' : 1, # Starting cepstral coefficient 'lo': 62.5, # Lowest band edge frequency of filterbank 'hi': 16000, # Highest band edge frequency of filterbank 'nfft': 16384, # FFT length for filterbank 'wfft': 8192, # FFT signal window length 'nhop': 4410, # FFT hop size 'window' : 'hamm', # FFT window type 'log10': False, # Whether to use log output 'magnitude': True, # Whether to use magnitude (False=power) 'power_ext': ".power",# File extension for power files 'intensify' : False, # Whether to use critical band masking in chroma extraction 'onsets' : False, # Whether to use onset-synchronus features 'verbosity' : 1 # How much to tell the user about extraction } Parameter keywords can be passed explicitly as formal arguments or as a keyword argument parameter dict:, e.g.: :: myFeatures = featureExtractor(fileName, nbpo=24, nhop=2205 ) myFeatures = featureExtractor(fileName, **{'nbpo':24, 'nhop':2205} ) To make a new feature extractor, just derive your new class from the Features class. New feature extractor classes might override default parameters and override the *extract* method: :: class MyExtractor(Features): def __init__(self, arg, **feature_params): feature_params['feature'] = 'hcqft' Features.__init__(self, arg, feature_params) def extract(self): Features.extract(self) self.X = do_some_extra_stuff(self.X) # further process the features self.__setattr__('new_parameter', new_value) # set up some new feature attributes Features instance members ========================= Any instance of the Features class (including all the feature extractors below) contain the feature parameters as class members. Additionally, a number of other useful class members are provided as follows: :: F = LogFrequencySpectrum(x) # An example instance of the Features class (or F=LogFrequencyCepstrum(x), etc...) F.any_feature_parameter # any keyword parameter: sample_rate, feature, nbpo, ncoef, lcoef, lo, hi, etc... F.X # instance features expressed as a N x T column-wise observation matrix for N-dimensional features F.STFT # complex-valued half spectrum of the STFT F.POWER # total power per frame F.Q # if log spectrum invoked, contains the constant-Q transform matrix for STFT->CQFT, else None F.CQFT # if log spectrum invoked, contains the CQFT feature matrix F.MFCC # if log cepstrum invoked, contains the MFCC feature matrix # Private (hidden) members that may be useful F._outN # size of the front-end output (F.fftN/2+1) F._cqtN # size of the log spectrum output F._fftfrqs # The center frequencies, up to F.hi, of the front-end filterbank (STFT) F._logfrqs # The center frequences, up to F.hi, of the log frequency transform (if extracted) F._logfbws # The bandwidths of the log frequency transform (if extracted) Feature Extractors ================== """ __version__ = '1.0' __author__ = '<NAME>' __copyright__ = "Copyright (C) 2010 <NAME>, Dartmouth College, All Rights Reserved" __license__ = "GPL Version 2.0 or Higher" __email__ = '<EMAIL>' import pylab as P import numpy as np import glob from . import error from . import plca from .sound import * from .audiodb import * import pdb from .features_base import Features, feature_plot, feature_scale # All features exposed as separate classes # Frequency Domain class LinearFrequencySpectrum(Features): """ Linear-frequency spectrum, the short-time Fourier transform. :: feature = 'stft' # The underlying algorithm For the STFT implementation, the following parameters control the trade-off between information in time and information in frequency: :: nfft = 16384 # default fft size wfft = 8192 # default window size nhop = 4410 # default hop size """ def __init__(self, arg=None, **feature_params): feature_params['feature']='stft' Features.__init__(self, arg, feature_params) class LogFrequencySpectrum(Features): """ Log-frequency constant-Q spectrum :: feature_params['feature']='cqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='cqft' Features.__init__(self, arg, feature_params) class MelFrequencySpectrum(Features): """ Mel-frequency constant-Q spectrum (same as log-frequency constant-Q spectrum) :: feature_params['feature']='cqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='cqft' Features.__init__(self, arg, feature_params) class Chromagram(Features): """" Chromagram :: feature_params['feature']='chroma' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='chroma' Features.__init__(self, arg, feature_params) class HighQuefrencyChromagram(Features): """" HighQuefrenyChromagram (High-Pass Liftered with MFCCs) :: feature_params['feature']='hchroma' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='hchroma' Features.__init__(self, arg, feature_params) # Cepstral Domain #class LinearFrequencyCepstrum(Features): # """ # Linear-frequency cepstrum # :: # feature_params['feature']='lcqft' # """ # def __init__(self, arg=None, **feature_params): # feature_params['feature']='lcqft' # Features.__init__(self, arg, feature_params) class LogFrequencyCepstrum(Features): """ Log-frequency cepstrum (same as mel-frequency cepstrum) :: feature_params['feature']='mfcc' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='mfcc' Features.__init__(self, arg, feature_params) class MelFrequencyCepstrum(Features): """ Log-frequency cepstrum (approximates MFCC, same as log-frequency cepstrum) :: feature_params['feature']='mfcc' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='mfcc' Features.__init__(self, arg, feature_params) # Quefrency-Domain Liftered # class LowQuefrencyLinearSpectrum(Features): # def __init__(self, arg=None, **feature_params): # Features.__init__(self, arg, feature_params) # class HighQuefrencyLinearSpectrum(Features): # def __init__(self, arg=None, **feature_params): # Features.__init__(self, arg, feature_params) class LowQuefrencyLogFrequencySpectrum(Features): """ Low-Quefrency Log Frequency Spectrum :: feature_params['feature']='lcqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='lcqft' Features.__init__(self, arg, feature_params) class HighQuefrencyLogFrequencySpectrum(Features): """ High-Quefrency Log-Frequency Spectrum: :: feature_params['feature']='hcqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='hcqft' Features.__init__(self, arg, feature_params) class LowQuefrencyMelSpectrum(Features): """ Low-Quefrency Mel-Frequency Spectrum :: feature_params['feature']='lcqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='lcqft' Features.__init__(self, arg, feature_params) class HighQuefrencyMelSpectrum(Features): """ High-Quefrency Mel-Frequency Spectrum: :: feature_params['feature']='hcqft' """ def __init__(self, arg=None, **feature_params): feature_params['feature']='hcqft' Features.__init__(self, arg, feature_params) # Time Domain class RMS(Features): """ Root mean square (RMS) :: feature_params['feature']='power' feature_params['mantitude']=True feature_params['log10']=False """ def __init__(self, arg=None, **feature_params): feature_params['feature']='power' feature_params['mantitude']=True feature_params['log10']=False Features.__init__(self, arg, feature_params) class LinearPower(Features): """ Linear power :: feature_params['feature']='power' feature_params['mantitude']=False feature_params['log10']=False """ def __init__(self, arg=None, **feature_params): feature_params['feature']='power' feature_params['mantitude']=False feature_params['log10']=False Features.__init__(self, arg, feature_params) class dBPower(Features): """ deci-Bel power (dB power) :: feature_params['feature']='power' feature_params['mantitude']=False feature_params['log10']=True """ def __init__(self, arg=None, **feature_params): feature_params['feature']='power' feature_params['mantitude']=False feature_params['log10']=True Features.__init__(self, arg, feature_params) # Statistics and Derivatives class LinearFrequencySpectrumCentroid(Features): """ Linear-Frequency Spectrum Centroid """ def __init__(self, arg, **kwargs): kwargs['feature']='stft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = (self.X.T * self._fftfrqs).sum(1) / self.X.T.sum(1) class LogFrequencySpectrumCentroid(Features): """ Log-Frequency Spectrum Centroid """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = (self.X.T * self._logfrqs).sum(1) / self.X.T.sum(1) class MelFrequencySpectrumCentroid(Features): """ Mel-Frequency Spectrum Centroid """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = (self.X.T * self._logfrqs).sum(1) / self.X.T.sum(1) class LinearFrequencySpectrumSpread(Features): """ Linear-Frequency Spectrum Spread """ def __init__(self, arg, **kwargs): kwargs['feature']='stft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) mf = (self.X.T * self._fftfrqs).sum(1) / self.X.T.sum(1) self.X = (((self.X / self.X.T.sum(1)).T * ((P.atleast_2d(self._fftfrqs).T - mf)).T)**2).sum(1) class LogFrequencySpectrumSpread(Features): """ Log-Frequency Spectrum Spread """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) mf = (self.X.T * self._logfrqs).sum(1) / self.X.T.sum(1) self.X = (((self.X / self.X.T.sum(1)).T * ((P.atleast_2d(self._logfrqs).T - mf)).T)**2).sum(1) class MelFrequencySpectrumSpread(Features): """ Mel-Frequency Spectrum Spread """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) mf = (self.X.T * self._logfrqs).sum(1) / self.X.T.sum(1) self.X = (((self.X / self.X.T.sum(1)).T * ((P.atleast_2d(self._logfrqs).T - mf)).T)**2).sum(1) #TODO: have STFT calculate _fftfreqs so axis plots are easy # have feature_plot be intelligent about feature dimensions # check frame count and last frame behaviour # limit frames read with keyword argument class LinearFrequencySpectrumFlux(Features): """ LinearFrequencySpectrumFlux """ def __init__(self, arg, **kwargs): kwargs['feature']='stft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = P.sqrt((P.diff(self.X)**2).sum(0))/self.X.shape[0] class LogFrequencySpectrumFlux(Features): """ LogFrequencySpectrumFlux """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = P.sqrt((P.diff(self.X)**2).sum(0))/self.X.shape[0] class MelFrequencySpectrumFlux(Features): """ MelFrequencySpectrumFlux """ def __init__(self, arg, **kwargs): kwargs['feature']='cqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = P.sqrt((P.diff(self.X)**2).sum(0))/self.X.shape[0] class LowQuefrencyCepstrumFlux(Features): """ LowQuefrencyCepstrumFlux """ def __init__(self, arg, **kwargs): kwargs['feature']='lcqft' Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self.X = P.sqrt((P.diff(self.X)**2).sum(0))/self.X.shape[0] # LinearFrequencyModulationPowerSpectrum class LinearFrequencyModulationPowerSpectrum(Features): """ LinearFrequencyModulationPowerSpectrum """ _log = False _window = None _hop = None def __init__(self, arg, window=None, hop=None, logscale=False, **kwargs): kwargs['feature']='stft' self._window, self._hop, self._log = window, hop, logscale Features.__init__(self, arg, kwargs) def extract(self): Features.extract(self) self._hop = 1 if self._hop is None else self._hop window, hop = self._window, self._hop if window and hop is not None: fp = self.feature_params num_frames = int((window*fp['sample_rate'])/(1000.0*fp['nhop'])) num_hop = int((hop*fp['sample_rate'])/(1000.0*fp['nhop'])) print(num_frames, num_hop) if not num_frames and num_hop : raise ValueError("num_frames and num_hop too small for FFT window / hop") else : Y = [] for k in range(0,self.X.shape[1]-window+1,num_hop): X = log(self.X[:,np.arange(k,k+num_frames)]+np.finfo(np.float32).eps) if self._log else self.X[:,
np.arange(k,k+num_frames)
numpy.arange
# coding: utf-8 # This notebook provides an example code for using the IGA2D class # In[1]: import IGA import numpy as np import matplotlib.pyplot as plt #get_ipython().magic(u'matplotlib inline') # In[2]: def run_case_1(num_knots, order, delta, norm, quad_degree=10): h = 1.0 / num_knots if delta > h: num_boundary_elements = np.ceil(delta / h) else: num_boundary_elements = 1 omega_p1 = np.linspace(-delta, 0, num=(num_boundary_elements + 1)) omega = np.linspace(0, 1, num=(num_knots+1)) omega_p2 = np.linspace(1, 1 + delta, num=(num_boundary_elements + 1)) knot_vector = np.r_[-delta * np.ones(order), omega_p1[:-1], omega[:-1], omega_p2, np.ones(order) * (1 + delta)] iga = IGA.PD1D(knot_vector, order, delta) iga.degree = quad_degree u = lambda x: x * (1 - x) b = lambda x: np.ones(x.shape[0]) iga.compute_solutions(u, b, num_boundary_elements) return iga.compute_error(norm=norm) # In[ ]: dofs = np.array([100,700]) errs = [ run_case_1(num_knots, order=1, delta=0.25, norm=2, quad_degree=4) for num_knots in dofs ] # In[ ]: # In[ ]: #Fit a straight line coefs = np.polyfit(
np.log10(1.0 / dofs)
numpy.log10
import numpy as np import matplotlib.pyplot as plt from FUNCS import FNS # variable class for body frame module class MapVar: def __init__(self, ax, limit, origin, ret_size): self.ax = ax self.origin = origin self.center = origin self.ret_size = ret_size self.trk_change = 0 self.offset = 0 self.ax.set_xlim(0, limit[0]) self.ax.set_ylim(0, limit[1]) self.ax.set_zlim(0, limit[2]) # target variables self.target = np.zeros(3) self.estimate = np.zeros(3) self.targ_data = np.zeros((2, 2)) self.targ, = self.ax.plot([], [], [], 'o', color='blue', markersize=6, label='veridical') self.targ_line, = self.ax.plot([], [], [], color='red', linestyle='dotted') self.left_line, = self.ax.plot([], [], [], color='blue', linestyle='dotted') self.right_line, = self.ax.plot([], [], [], color='blue', linestyle='dotted') self.cent_line, = self.ax.plot([], [], [], color='black', linestyle='dotted') # estimate variables self.est, = self.ax.plot([], [], [], 'o', color='red', markersize=6, label='estimate') self.left_est, = self.ax.plot([], [], [], color='red', linestyle='dotted') self.right_est, = self.ax.plot([], [], [], color='red', linestyle='dotted') # body frame variables self.head, = self.ax.plot([], [], [], color='black') self.head_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5) self.left_eye, = self.ax.plot([], [], [], color='black') self.right_eye, = self.ax.plot([], [], [], color='black') self.left_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5) self.right_cent, = self.ax.plot([], [], [], 'x', color='black', markersize=2.5) self.left_fov, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.right_fov, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.column, = self.ax.plot([], [], [], color='black') self.column_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.pectoral, = self.ax.plot([], [], [], color='black') self.pelvic, = self.ax.plot([], [], [], color='black') self.CoM, = self.ax.plot([], [], [], 'x', color='blue', markersize=2.5) self.left_uplimb, = self.ax.plot([], [], [], color='black') self.left_uplimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.right_uplimb, = self.ax.plot([], [], [], color='black') self.right_uplimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.left_lowlimb, = self.ax.plot([], [], [], color='black') self.left_lowlimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.right_lowlimb, = self.ax.plot([], [], [], color='black') self.right_lowlimb_jnt, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) # muscles variables self.base_frame, = self.ax.plot([], [], [], color='black') self.thorax_frame, = self.ax.plot([], [], [], color='black') self.lumbar_frame, = self.ax.plot([], [], [], color='black') self.pect_frame, = self.ax.plot([], [], [], color='black') self.humr_frame, = self.ax.plot([], [], [], color='black') self.shoul_frame, = self.ax.plot([], [], [], color='black') self.elbow_frame, = self.ax.plot([], [], [], color='black') self.pelv_frame, = self.ax.plot([], [], [], color='black') self.femr_frame, = self.ax.plot([], [], [], color='black') self.hip_frame, = self.ax.plot([], [], [], color='black') self.knee_frame, = self.ax.plot([], [], [], color='black') self.left_neck_mus, = self.ax.plot([], [], [], color='red') self.right_neck_mus, = self.ax.plot([], [], [], color='red') self.left_trunk_mus, = self.ax.plot([], [], [], color='red') self.right_trunk_mus, = self.ax.plot([], [], [], color='red') self.shoul_horz, = self.ax.plot([], [], [], color='red') self.shoul_vert, = self.ax.plot([], [], [], color='red') self.elbow_vert, = self.ax.plot([], [], [], color='red') self.wrist_vert, = self.ax.plot([], [], [], color='red') self.hip_horz, = self.ax.plot([], [], [], color='red') self.hip_vert, = self.ax.plot([], [], [], color='red') self.knee_vert, = self.ax.plot([], [], [], color='red') self.ankle_vert, = self.ax.plot([], [], [], color='red') self.column, = self.ax.plot([], [], [], color='black') self.left_limb, = self.ax.plot([], [], [], color='black') self.right_limb, = self.ax.plot([], [], [], color='black') self.left_joints, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.left_inserts, = self.ax.plot([], [], [], 'o', color='blue', markersize=2.5) self.right_joints, = self.ax.plot([], [], [], 'o', color='red', markersize=2.5) self.right_inserts, = self.ax.plot([], [], [], 'o', color='blue', markersize=2.5) # external force variables self.fof = np.zeros((2, 3, 2)) self.grf = np.zeros((2, 3, 2)) # method class for body frame module class MapFun: def __init__(self, eye_data, axial_data, append_data, MapVar): self.MapVar = MapVar self.FNS = FNS() self.origin = MapVar.origin self.ret_size = self.MapVar.ret_size self.magnify = 7 self.ang_rang = np.radians((-45, 45)) self.dist_rang = np.array((5, 50)) self.default = -5 # initialize eye positions and joint angles self.eye_rot = FNS().eye_init() self.neck_rot, self.trunk_rot = FNS().column_init() self.uplimb_rot = (FNS().uplimb_init(), FNS().uplimb_init()) self.lowlimb_rot = (FNS().lowlimb_init(), FNS().lowlimb_init()) # updated eye positions and joint angles self.eye_data = eye_data self.axial_data = axial_data self.append_data = append_data # draw lines of sight from target to eyes def targ_plt(self, head_cent, head_ahead, left_targ, right_targ): targ = self.MapVar.target targ_data = self.MapVar.targ_data self.MapVar.targ.set_data(targ[0], targ[1]) self.MapVar.targ.set_3d_properties(targ[2]) if np.array_equal(targ_data, np.zeros((2, 2))) != True: #targ_line = np.transpose(np.array((targ, head_cent)), (1, 0)) #self.MapVar.targ_line.set_data(targ_line[0], targ_line[1]) #self.MapVar.targ_line.set_3d_properties(targ_line[2]) left_line = np.transpose(np.array((targ, left_targ)), (1, 0)) self.MapVar.left_line.set_data(left_line[0], left_line[1]) self.MapVar.left_line.set_3d_properties(left_line[2]) right_line = np.transpose(np.array((targ, right_targ)), (1, 0)) self.MapVar.right_line.set_data(right_line[0], right_line[1]) self.MapVar.right_line.set_3d_properties(right_line[2]) #cent_line = np.transpose(np.array((head_ahead, head_cent)), (1, 0)) #self.MapVar.cent_line.set_data(cent_line[0], cent_line[1]) #self.MapVar.cent_line.set_3d_properties(cent_line[2]) # draw lines of sight from estimate to eyes def est_plt(self, est, left_fov, right_fov): self.MapVar.est.set_data(est[0], est[1]) self.MapVar.est.set_3d_properties(est[2]) left_est = np.transpose(np.array((est, left_fov)), (1, 0)) self.MapVar.left_est.set_data(left_est[0], left_est[1]) self.MapVar.left_est.set_3d_properties(left_est[2]) right_est = np.transpose(np.array((est, right_fov)), (1, 0)) self.MapVar.right_est.set_data(right_est[0], right_est[1]) self.MapVar.right_est.set_3d_properties(right_est[2]) # compute head and eye positions in the body frame and do not update if shift=0 indicates the feet are # driven into ground def head_cpt(self, shift): FNS = self.FNS magn = self.magnify size = self.ret_size left_targ_hit, right_targ_hit = self.MapVar.targ_data self.eye_rot = self.eye_data left_eye_rot, right_eye_rot = self.eye_rot if shift == 0: neck_rot_vert, neck_rot_horz = self.neck_rot truk_rot_vert, truk_rot_horz = self.trunk_rot else: self.neck_rot, self.trunk_rot = self.axial_data neck_rot_vert, neck_rot_horz = self.neck_rot truk_rot_vert, truk_rot_horz = self.trunk_rot column = self.column_cpt(shift)[0] base = np.array((column[0][0], column[1][0], column[2][0])) head_cent = base + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 3 * magn) head_ahead = head_cent + FNS.latr_front(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 20) left_cent = head_cent + FNS.latr_left(truk_rot_horz + neck_rot_horz, 0, 2 * magn) right_cent = head_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, 0, 2 * magn) left_rad_est, left_ang_est = FNS.polar_tran((magn / size) * left_eye_rot[0], (magn / size) * left_eye_rot[1]) left_fov = left_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, left_ang_est, left_rad_est) right_rad_est, right_ang_est = FNS.polar_tran((magn / size) * right_eye_rot[0], (magn / size) * right_eye_rot[1]) right_fov = right_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, right_ang_est, right_rad_est) left_rad_verd, left_ang_verd = FNS.polar_tran((magn / size) * left_targ_hit[0], (magn / size) * left_targ_hit[1]) left_targ = left_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, left_ang_verd, left_rad_verd) right_rad_verd, right_ang_verd = FNS.polar_tran((magn / size) * right_targ_hit[0], (magn / size) * right_targ_hit[1]) right_targ = right_cent + FNS.latr_right(truk_rot_horz + neck_rot_horz, right_ang_verd, right_rad_verd) head, left_eye, right_eye = FNS.head_plane(head_cent, truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, magn) head = np.transpose(np.array((head[0], head[1], head[3], head[2], head[0])), (1, 0)) left_eye = np.transpose(np.array((left_eye[0], left_eye[1], left_eye[3], left_eye[2], left_eye[0])), (1, 0)) right_eye = np.transpose(np.array((right_eye[0], right_eye[1], right_eye[3], right_eye[2], right_eye[0])), (1, 0)) return (head_cent, head, head_ahead), (left_eye, right_eye), (left_cent, right_cent), \ (left_fov, right_fov), (left_targ, right_targ) # draw head and eye positions def head_plt(self, head_cent, head, left_eye, right_eye, left_cent, right_cent, left_fov, right_fov): self.MapVar.head.set_data(head[0], head[1]) self.MapVar.head.set_3d_properties(head[2]) self.MapVar.head_cent.set_data(head_cent[0], head_cent[1]) self.MapVar.head_cent.set_3d_properties(head_cent[2]) self.MapVar.left_eye.set_data(left_eye[0], left_eye[1]) self.MapVar.left_eye.set_3d_properties(left_eye[2]) self.MapVar.left_cent.set_data(left_cent[0], left_cent[1]) self.MapVar.left_cent.set_3d_properties(left_cent[2]) self.MapVar.right_eye.set_data(right_eye[0], right_eye[1]) self.MapVar.right_eye.set_3d_properties(right_eye[2]) self.MapVar.right_cent.set_data(right_cent[0], right_cent[1]) self.MapVar.right_cent.set_3d_properties(right_cent[2]) self.MapVar.left_fov.set_data(left_fov[0], left_fov[1]) self.MapVar.left_fov.set_3d_properties(left_fov[2]) self.MapVar.right_fov.set_data(right_fov[0], right_fov[1]) self.MapVar.right_fov.set_3d_properties(right_fov[2]) # compute position of center of mass due to column and/or leg movements and mode=(0, 1) indicates left leg # swing and right leg stance and mode=(1, 0) the reverse situation def CoM_shift(self, mode): FNS = self.FNS origin = self.MapVar.origin dep = self.default truk_rot_vert, truk_rot_horz = self.axial_data[1] (left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \ (left_ankle_rot_vert, left_ankle_rot_horz) = self.append_data[1][0] (right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \ (right_ankle_rot_vert, right_ankle_rot_horz) = self.append_data[1][1] # shift of CoM due to column movement shift_col = FNS.vert_up(0, 0, 10) - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10) if mode == (0, 1): # shift of CoM due to forward left leg movement shift_limb = FNS.vert_up(0, 0, 35) - FNS.vert_up(right_hip_rot_horz, right_hip_rot_vert, 20) - \ FNS.vert_up(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15) shift = shift_col + shift_limb # check if left foot is driven into ground left_foot, right_foot = self.lowlimb_tst(shift) if left_foot[2] < dep: shift = np.zeros(3) self.MapVar.center = origin - shift return 0 else: shift = shift * np.array((1, -1, 1)) self.MapVar.offset = shift * self.MapVar.trk_change + self.MapVar.offset * (1 - self.MapVar.trk_change) # update CoM position self.MapVar.center = origin - shift + self.MapVar.offset return 1 if mode == (1, 0): # shift of CoM due to forward right leg movement shift_limb = FNS.vert_up(0, 0, 35) - FNS.vert_up(left_hip_rot_horz, left_hip_rot_vert, 20) - \ FNS.vert_up(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15) shift = shift_col + shift_limb # check if right foot is driven into ground left_foot, right_foot = self.lowlimb_tst(shift) if right_foot[2] < dep: shift = np.zeros(3) self.MapVar.center = origin - shift return 0 else: shift = shift * np.array((1, 1, 1)) self.MapVar.offset = shift * self.MapVar.trk_change + self.MapVar.offset * (1 - self.MapVar.trk_change) # update CoM position self.MapVar.center = origin - shift + self.MapVar.offset return 1 # compute positions of base of head, cervic (neck), thorax (for pectoral girdle), lumbar (CoM), and sacrum # (pelvic and for pelvic girdle) def column_cpt(self, shift): FNS = self.FNS if shift == 0: neck_rot_vert, neck_rot_horz = self.neck_rot truk_rot_vert, truk_rot_horz = self.trunk_rot else: self.neck_rot, self.trunk_rot = self.axial_data neck_rot_vert, neck_rot_horz = self.neck_rot truk_rot_vert, truk_rot_horz = self.trunk_rot center = self.MapVar.center lumbar = center sacrum = lumbar - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10) thorax = lumbar + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30) cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10) base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5) left_pectoral = thorax + FNS.latr_left(truk_rot_horz, 0, 10) right_pectoral = thorax + FNS.latr_right(truk_rot_horz, 0, 10) left_pelvic = sacrum + FNS.latr_left(0, 0, 5) right_pelvic = sacrum + FNS.latr_right(0, 0, 5) column = np.transpose(np.array((base, cervic, thorax, lumbar, sacrum)), (1, 0)) pectoral = np.transpose(np.array((left_pectoral, thorax, right_pectoral)), (1, 0)) pelvic = np.transpose(np.array((left_pelvic, sacrum, right_pelvic)), (1, 0)) return column, pectoral, pelvic # draw positions of column segments def column_plt(self, column, pectoral, pelvic): self.MapVar.column.set_data(column[0], column[1]) self.MapVar.column.set_3d_properties(column[2]) self.MapVar.pectoral.set_data(pectoral[0], pectoral[1]) self.MapVar.pectoral.set_3d_properties(pectoral[2]) self.MapVar.pelvic.set_data(pelvic[0], pelvic[1]) self.MapVar.pelvic.set_3d_properties(pelvic[2]) cervic = (column[0][1], column[1][1], column[2][1]) sacrum = (column[0][4], column[1][4], column[2][4]) CoM = (column[0][3], column[1][3], column[2][3]) column_jnt = np.transpose(np.array((cervic, sacrum)), (1, 0)) self.MapVar.column_jnt.set_data(column_jnt[0], column_jnt[1]) self.MapVar.column_jnt.set_3d_properties(column_jnt[2]) self.MapVar.CoM.set_data(CoM[0], CoM[1]) self.MapVar.CoM.set_3d_properties(CoM[2]) # compute positions of shoulders elbows and wrists of upper limbs def uplimb_cpt(self, shift): FNS = self.FNS pectoral = self.column_cpt(shift)[1] left_shoulder = np.array((pectoral[0][0], pectoral[1][0], pectoral[2][0])) right_shoulder = np.array((pectoral[0][2], pectoral[1][2], pectoral[2][2])) if shift == 0: (left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \ (left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0] (right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \ (right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1] else: self.uplimb_rot = self.append_data[0] (left_shoul_rot_vert, left_shoul_rot_horz), (left_elbow_rot_vert, left_elbow_rot_horz), \ (left_wrist_rot_vert, left_wrist_rot_horz) = self.uplimb_rot[0] (right_shoul_rot_vert, right_shoul_rot_horz), (right_elbow_rot_vert, right_elbow_rot_horz), \ (right_wrist_rot_vert, right_wrist_rot_horz) = self.uplimb_rot[1] left_elbow = left_shoulder + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert, 15) left_wrist = left_elbow + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert, 10) left_hand = left_wrist + FNS.vert_down(left_shoul_rot_horz, left_shoul_rot_vert + left_elbow_rot_vert + left_wrist_rot_vert, 5) right_elbow = right_shoulder + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert, 15) right_wrist = right_elbow + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert, 10) right_hand = right_wrist + FNS.vert_down(right_shoul_rot_horz, right_shoul_rot_vert + right_elbow_rot_vert + right_wrist_rot_vert, 5) left_limb = np.transpose(np.array((left_shoulder, left_elbow, left_wrist, left_hand)), (1, 0)) right_limb = np.transpose(np.array((right_shoulder, right_elbow, right_wrist, right_hand)), (1, 0)) return left_limb, right_limb # draw positions of upper limbs def uplimb_plt(self, left_uplimb, right_uplimb): self.MapVar.left_uplimb.set_data(left_uplimb[0], left_uplimb[1]) self.MapVar.left_uplimb.set_3d_properties(left_uplimb[2]) left_shoul = (left_uplimb[0][0], left_uplimb[1][0], left_uplimb[2][0]) left_elbow = (left_uplimb[0][1], left_uplimb[1][1], left_uplimb[2][1]) left_wrist = (left_uplimb[0][2], left_uplimb[1][2], left_uplimb[2][2]) left_uplimb_jnt = np.transpose(np.array((left_shoul, left_elbow, left_wrist)), (1, 0)) self.MapVar.left_uplimb_jnt.set_data(left_uplimb_jnt[0], left_uplimb_jnt[1]) self.MapVar.left_uplimb_jnt.set_3d_properties(left_uplimb_jnt[2]) self.MapVar.right_uplimb.set_data(right_uplimb[0], right_uplimb[1]) self.MapVar.right_uplimb.set_3d_properties(right_uplimb[2]) right_shoul = (right_uplimb[0][0], right_uplimb[1][0], right_uplimb[2][0]) right_elbow = (right_uplimb[0][1], right_uplimb[1][1], right_uplimb[2][1]) right_wrist = (right_uplimb[0][2], right_uplimb[1][2], right_uplimb[2][2]) right_uplimb_jnt = np.transpose(np.array((right_shoul, right_elbow, right_wrist)), (1, 0)) self.MapVar.right_uplimb_jnt.set_data(right_uplimb_jnt[0], right_uplimb_jnt[1]) self.MapVar.right_uplimb_jnt.set_3d_properties(right_uplimb_jnt[2]) # compute positions of hips, knees and ankles of lower limbs def lowlimb_cpt(self, shift): FNS = self.FNS pelvic = self.column_cpt(shift)[2] left_hip = np.array((pelvic[0][0], pelvic[1][0], pelvic[2][0])) right_hip = np.array((pelvic[0][2], pelvic[1][2], pelvic[2][2])) if shift == 0: (left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \ (left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0] (right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \ (right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1] else: self.lowlimb_rot = self.append_data[1] (left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \ (left_ankle_rot_vert, left_ankle_rot_horz) = self.lowlimb_rot[0] (right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \ (right_ankle_rot_vert, right_ankle_rot_horz) = self.lowlimb_rot[1] left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20) left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15) left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert + left_ankle_rot_vert + np.pi / 2, 5) left_limb = np.transpose(np.array((left_hip, left_knee, left_ankle, left_foot)), (1, 0)) right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20) right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15) right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert + right_ankle_rot_vert + np.pi / 2, 5) right_limb = np.transpose(np.array((right_hip, right_knee, right_ankle, right_foot)), (1, 0)) return left_limb, right_limb # draw positions of lower limbs def lowlimb_plt(self, left_lowlimb, right_lowlimb): self.MapVar.left_lowlimb.set_data(left_lowlimb[0], left_lowlimb[1]) self.MapVar.left_lowlimb.set_3d_properties(left_lowlimb[2]) left_hip = (left_lowlimb[0][0], left_lowlimb[1][0], left_lowlimb[2][0]) left_knee = (left_lowlimb[0][1], left_lowlimb[1][1], left_lowlimb[2][1]) left_ankle = (left_lowlimb[0][2], left_lowlimb[1][2], left_lowlimb[2][2]) left_lowlimb_jnt = np.transpose(np.array((left_hip, left_knee, left_ankle)), (1, 0)) self.MapVar.left_lowlimb_jnt.set_data(left_lowlimb_jnt[0], left_lowlimb_jnt[1]) self.MapVar.left_lowlimb_jnt.set_3d_properties(left_lowlimb_jnt[2]) self.MapVar.right_lowlimb.set_data(right_lowlimb[0], right_lowlimb[1]) self.MapVar.right_lowlimb.set_3d_properties(right_lowlimb[2]) right_hip = (right_lowlimb[0][0], right_lowlimb[1][0], right_lowlimb[2][0]) right_knee = (right_lowlimb[0][1], right_lowlimb[1][1], right_lowlimb[2][1]) right_ankle = (right_lowlimb[0][2], right_lowlimb[1][2], right_lowlimb[2][2]) right_lowlimb_jnt = np.transpose(np.array((right_hip, right_knee, right_ankle)), (1, 0)) self.MapVar.right_lowlimb_jnt.set_data(right_lowlimb_jnt[0], right_lowlimb_jnt[1]) self.MapVar.right_lowlimb_jnt.set_3d_properties(right_lowlimb_jnt[2]) # test if shift of CoM would cause either feet into ground def lowlimb_tst(self, shift): FNS = self.FNS neck_rot_vert, neck_rot_horz = self.axial_data[0] truk_rot_vert, truk_rot_horz = self.axial_data[1] center = self.MapVar.origin - shift sacrum = center - FNS.vert_up(truk_rot_horz, truk_rot_vert, 10) lumbar = center thorax = center + FNS.vert_up(truk_rot_horz, truk_rot_vert, 30) cervic = thorax + FNS.vert_up(truk_rot_horz, truk_rot_vert, 10) base = cervic + FNS.vert_up(truk_rot_horz + neck_rot_horz, truk_rot_vert + neck_rot_vert, 5) left_hip = sacrum + FNS.latr_left(0, 0, 5) right_hip = sacrum + FNS.latr_right(0, 0, 5) (left_hip_rot_vert, left_hip_rot_horz), (left_knee_rot_vert, left_knee_rot_horz), \ (left_ankle_rot_vert, left_ankle_rot_horz) = self.append_data[1][0] (right_hip_rot_vert, right_hip_rot_horz), (right_knee_rot_vert, right_knee_rot_horz), \ (right_ankle_rot_vert, right_ankle_rot_horz) = self.append_data[1][1] left_knee = left_hip + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert, 20) left_ankle = left_knee + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert, 15) left_foot = left_ankle + FNS.vert_down(left_hip_rot_horz, left_hip_rot_vert + left_knee_rot_vert + left_ankle_rot_vert + np.pi / 2, 5) right_knee = right_hip + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert, 20) right_ankle = right_knee + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert, 15) right_foot = right_ankle + FNS.vert_down(right_hip_rot_horz, right_hip_rot_vert + right_knee_rot_vert + right_ankle_rot_vert + np.pi / 2, 5) return left_foot, right_foot # compute external torque for force of gravity and ground reaction force from joint positions of lower limbs def ext_forc(self, shift): FNS = self.FNS dep = 0.2 * self.default fof = np.zeros((2, 3, 2)) grf = np.zeros((2, 3, 2)) base, cervic, thorax, lumbar, sacrum = np.transpose(self.column_cpt(shift)[0], (1, 0)) left_hip, left_knee, left_ankle, left_foot = np.transpose(self.lowlimb_cpt(shift)[0], (1, 0)) right_hip, right_knee, right_ankle, right_foot = np.transpose(self.lowlimb_cpt(shift)[1], (1, 0)) # magnitude of external force mass = (50 + 5 + 20) * 0.001 # moment arm of force of gravity CoM = np.array((lumbar[0], lumbar[1], left_hip[2])) moment = np.linalg.norm(left_hip - CoM) fof[0][0][0] = moment * mass CoM = np.array((lumbar[0], lumbar[1], left_knee[2])) moment = np.linalg.norm(left_knee - CoM) fof[0][1][0] = moment * mass CoM = np.array((lumbar[0], lumbar[1], left_ankle[2])) moment = np.linalg.norm(left_ankle - CoM) fof[0][2][0] = moment * mass CoM = np.array((lumbar[0], lumbar[1], right_hip[2])) moment = np.linalg.norm(right_hip - CoM) fof[1][0][0] = moment * mass CoM = np.array((lumbar[0], lumbar[1], right_knee[2])) moment = np.linalg.norm(right_knee - CoM) fof[1][1][0] = moment * mass CoM = np.array((lumbar[0], lumbar[1], right_ankle[2])) moment = np.linalg.norm(right_ankle - CoM) fof[1][2][0] = moment * mass self.MapVar.fof = fof # moment arm of ground reaction force left_cond = FNS.delta_fn(FNS.cond_fn(left_ankle[2], -dep), 1) right_cond = FNS.delta_fn(FNS.cond_fn(right_ankle[2], -dep), 1) # both feet on ground if left_cond == 1 and right_cond == 1: mid_dist = np.linalg.norm(left_ankle - right_ankle) / 2 cent = left_ankle + 0.5 * (right_ankle - left_ankle) CoP = np.array((cent[0], cent[1], left_ankle[2])) moment = np.linalg.norm(left_ankle - CoP) grf[0][2][0] = moment * mass CoP = np.array((cent[0], cent[1], left_knee[2])) moment = np.linalg.norm(left_knee - CoP) grf[0][1][0] = moment * mass CoP = np.array((cent[0], cent[1], left_hip[2])) moment =
np.linalg.norm(left_hip - CoP)
numpy.linalg.norm
import numpy as np from typing import Callable, Optional class KalmanFilter(object): def __init__(self, x0, P0, F, G, N, H): self.x = np.array(x0) self.P = np.array(P0) self.F = np.array(F) self.G =
np.array(G)
numpy.array
import sys, os this_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.realpath(this_dir + '/../magphase/src')) import numpy as np from matplotlib import pyplot as plt import libutils as lu import libaudio as la import magphase as mp from scikits.talkbox import lpc from scipy.signal import lfilter from scipy import interpolate def lpc_to_mag(v_lpc, fft_len=4096): ''' Computed the magnitude spectrum from LPC coefficients using approximation by FFT method. ''' v_imp = np.r_[1, np.zeros(fft_len-1)] v_imp_filt = lfilter(np.array([1.0]), v_lpc, v_imp) v_mag = np.absolute(np.fft.fft(v_imp_filt)) v_mag = la.remove_hermitian_half(v_mag[None,:])[0] return v_mag def get_formant_locations_from_spec_env(v_sp_env): ''' v_sp_env could be in db, log, or absolute value. ''' v_mag_diff = np.diff(v_sp_env) v_mag_diff[v_mag_diff>=0.0] = 1.0 v_mag_diff[v_mag_diff<0.0] = -1.0 v_mag_diff_diff = np.diff(v_mag_diff) v_frmnts_bins = np.where(v_mag_diff_diff<0.0)[0] + 1 v_frmnts_gains = v_sp_env[v_frmnts_bins] return v_frmnts_bins, v_frmnts_gains def get_formant_locations_from_raw_long_frame(v_sig, v_pm, nx, fft_len): ''' nx: frame index ''' #v_sig, fs = la.read_audio_file(wavfile) # Epoch detection: #v_pm_sec, v_voi = la.reaper_epoch_detection(wavfile) #v_pm = lu.round_to_int(v_pm_sec * fs) # Raw-long Frame extraction: v_frm_long = v_sig[v_pm[nx-2]:v_pm[nx+2]+1] # Win: left_len = v_pm[nx] - v_pm[nx-2] right_len = v_pm[nx+2] - v_pm[nx] v_win = la.gen_non_symmetric_win(left_len, right_len, np.hanning, b_norm=False) v_frm_long_win = v_frm_long * v_win # Spectrum: v_mag = np.absolute(np.fft.fft(v_frm_long_win, n=fft_len)) v_mag_db = la.db(la.remove_hermitian_half(v_mag[None,:])[0]) # Formant extraction -LPC method:-------------------------------------------------- v_lpc, v_e, v_refl = lpc(v_frm_long_win, 120) b_use_lpc_roots = False if b_use_lpc_roots: v_lpc_roots = np.roots(v_lpc) v_lpc_angles = np.angle(v_lpc_roots) v_lpc_angles = v_lpc_angles[v_lpc_angles>=0] v_lpc_angles = np.sort(v_lpc_angles) fft_len_half = 1 + fft_len / 2 v_lpc_roots_bins = v_lpc_angles * fft_len_half / np.pi v_lpc_mag = lpc_to_mag(v_lpc, fft_len=fft_len) v_lpc_mag_db = la.db(v_lpc_mag) v_lpc_mag_db = v_lpc_mag_db -
np.mean(v_lpc_mag_db)
numpy.mean
"""Test becquerel's Spectrum.""" import pytest import datetime import numpy as np from uncertainties import ufloat, UFloat, unumpy import becquerel as bq TEST_DATA_LENGTH = 256 TEST_COUNTS = 4 TEST_GAIN = 8.23 TEST_EDGES_KEV = np.arange(TEST_DATA_LENGTH + 1) * TEST_GAIN def make_data(lam=TEST_COUNTS, size=TEST_DATA_LENGTH): """Build a vector of random counts.""" floatdata = np.random.poisson(lam=lam, size=size) return floatdata.astype(int) def make_spec(t, lt=None, lam=TEST_COUNTS): """Get spectrum to use in parameterized tests. Pytest Note: one might think you could do: @pytest.mark.parametrize('spec1, spec2', [ (uncal_spec, uncal_spec), (cal_spec, cal_spec) ]) def test_add(spec1, spec2): ... but you can't put fixtures inside parametrize(). Thus the fixtures call this function for simplicity. """ if t == "uncal": return bq.Spectrum(make_data(lam=lam), livetime=lt) elif t == "cal": return bq.Spectrum( make_data(lam=lam), bin_edges_kev=TEST_EDGES_KEV, livetime=lt ) elif t == "cal_new": return bq.Spectrum( make_data(lam=lam), livetime=lt, bin_edges_kev=np.arange(TEST_DATA_LENGTH + 1) * 0.67, ) elif t == "applied_energy_cal": spec = bq.Spectrum( make_data(lam=lam), livetime=lt, ) cal = bq.Calibration("p[0] * x", [0.67]) spec.apply_calibration(cal) return spec elif t == "cal_cps": return bq.Spectrum( cps=make_data(lam=lam), bin_edges_kev=TEST_EDGES_KEV, livetime=lt ) elif t == "uncal_long": return bq.Spectrum(make_data(lam=lam, size=TEST_DATA_LENGTH * 2), livetime=lt) elif t == "uncal_cps": return bq.Spectrum(cps=make_data(lam=lam), livetime=lt) elif t == "data": return make_data() else: return t @pytest.fixture def spec_data(): """Build a vector of random counts.""" return make_data() @pytest.fixture def uncal_spec(spec_data): """Generate an uncalibrated spectrum.""" return make_spec("uncal") @pytest.fixture def uncal_spec_2(spec_data): """Generate an uncalibrated spectrum (2nd instance).""" return make_spec("uncal") @pytest.fixture def uncal_spec_cps(spec_data): """Generate an uncalibrated spectrum with cps data.""" return make_spec("uncal_cps") @pytest.fixture def uncal_spec_long(spec_data): """Generate an uncalibrated spectrum, of longer length.""" return make_spec("uncal_long") @pytest.fixture def cal_spec(spec_data): """Generate a calibrated spectrum.""" return make_spec("cal") @pytest.fixture def cal_spec_2(spec_data): """Generate a calibrated spectrum (2nd instance).""" return make_spec("cal") # ---------------------------------------------- # Test Spectrum.__init__() # ---------------------------------------------- def test_uncal(uncal_spec): """Test simple uncalibrated construction.""" assert len(uncal_spec.counts) == TEST_DATA_LENGTH assert not uncal_spec.is_calibrated assert uncal_spec.energy_cal is None def test_uncal_cps(uncal_spec_cps): """Test simple uncalibrated construction w CPS. More CPS tests later""" assert len(uncal_spec_cps.cps) == TEST_DATA_LENGTH assert not uncal_spec_cps.is_calibrated assert uncal_spec_cps.energy_cal is None def test_cal(cal_spec): """Test simple calibrated construction.""" assert len(cal_spec.counts) == TEST_DATA_LENGTH assert len(cal_spec.bin_edges_kev) == TEST_DATA_LENGTH + 1 assert len(cal_spec.bin_centers_kev) == TEST_DATA_LENGTH assert cal_spec.is_calibrated def test_init_exceptions(spec_data): """Test errors on initialization.""" with pytest.raises(bq.SpectrumError): bq.Spectrum([]) with pytest.raises(bq.SpectrumError): bq.Spectrum(cps=[]) with pytest.raises(bq.SpectrumError): bq.Spectrum(spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1]) with pytest.raises(bq.SpectrumError): bq.Spectrum(cps=spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1]) with pytest.raises(bq.SpectrumError): bq.Spectrum(spec_data, cps=spec_data) with pytest.raises(bq.SpectrumError): bq.Spectrum(bin_edges_kev=TEST_EDGES_KEV) bad_edges = TEST_EDGES_KEV.copy() bad_edges[12] = bad_edges[9] with pytest.raises(ValueError): bq.Spectrum(spec_data, bin_edges_kev=bad_edges) def test_uncalibrated_exception(uncal_spec): """Test UncalibratedError.""" with pytest.raises(bq.UncalibratedError): uncal_spec.bin_centers_kev def test_negative_input(spec_data): """Make sure negative values in counts throw an exception, and exception is not raised if uncs are provided.""" neg_spec = spec_data[:] neg_spec[::2] *= -1 neg_uncs = np.where(neg_spec < 0, np.nan, 1) with pytest.raises(bq.SpectrumError): spec = bq.Spectrum(neg_spec) spec = bq.Spectrum(neg_spec, uncs=neg_uncs) assert np.any(spec.counts_vals < 0) assert np.any(np.isnan(spec.counts_uncs)) @pytest.mark.parametrize("spec_type", ["uncal", "cal", "uncal_long", "cal"]) def test_init_with_lists(spec_type): spec = make_spec(spec_type) assert np.all( bq.Spectrum( counts=list(spec.counts_vals), bin_edges_raw=spec.bin_edges_raw, ).counts_vals == spec.counts_vals ) # ---------------------------------------------- # Test Spectrum.from_listmode behavior # ---------------------------------------------- NBINS = 100 NEDGES = NBINS + 1 MEAN = 1000.0 STDDEV = 50.0 NSAMPLES = 10000 XMIN, XMAX = 0.0, 2000.0 BW = (XMAX - XMIN) / (1.0 * NBINS) lmd = np.random.normal(MEAN, STDDEV, NSAMPLES) log_bins = np.logspace(1, 4, num=NEDGES, base=10.0) def make_spec_listmode(t, is_cal=False, apply_cal=False): if t == "uniform": spec = bq.Spectrum.from_listmode( lmd, is_cal=is_cal, bins=NBINS, xmin=XMIN, xmax=XMAX ) elif t == "log": spec = bq.Spectrum.from_listmode(lmd, is_cal=is_cal, bins=log_bins) elif t == "default": spec = bq.Spectrum.from_listmode(lmd, is_cal=is_cal) else: return t if apply_cal: cal = bq.Calibration.from_linear([0.0, TEST_GAIN]) spec.apply_calibration(cal) assert spec.energy_cal is not None return spec @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_listmode_uniform(is_cal, apply_cal): """Test listmode spectra with uniform binning. It's easy to introduce off-by-one errors in histogramming listmode data, so run quite a few sanity checks here and in the following tests. """ if is_cal and apply_cal: return spec = make_spec_listmode("uniform", is_cal, apply_cal) xmin, xmax, bw = XMIN, XMAX, BW if apply_cal and not is_cal: xmin *= TEST_GAIN xmax *= TEST_GAIN bw *= TEST_GAIN edges, widths, _ = spec.get_bin_properties() assert len(spec) == NBINS assert np.all(np.isclose(widths, bw)) assert edges[0] == xmin assert edges[-1] == xmax assert len(edges) == NBINS + 1 assert spec.has_uniform_bins() @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_listmode_non_uniform(is_cal, apply_cal): """Test listmode spectra with non-uniform bins.""" if is_cal and apply_cal: return spec = make_spec_listmode("log", is_cal, apply_cal) assert len(spec) == NBINS assert spec.has_uniform_bins() is False @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_listmode_no_args(is_cal, apply_cal): """Test listmode spectra without args.""" spec = make_spec_listmode("default", is_cal, apply_cal) assert len(spec) == int(np.ceil(max(lmd))) def test_listmode_is_cal(): """Test that initially-calibrated listmode data matches uncal data.""" spec = make_spec_listmode("default", is_cal=True) e0, w0, c0 = spec.get_bin_properties(use_kev=True) e1, w1, c1 = spec.get_bin_properties(use_kev=False) assert np.allclose(e0, e1) assert np.allclose(w0, w1) assert np.allclose(c0, c1) @pytest.mark.parametrize("spec_str", ["uniform", "log"]) @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_find_bin_index(spec_str, is_cal, apply_cal): """Test that find_bin_index works for various spectrum objects.""" spec = make_spec_listmode(spec_str, is_cal, apply_cal) edges, widths, _ = spec.get_bin_properties() xmin, xmax = edges[0], edges[-1] assert spec.find_bin_index(xmin) == 0 assert spec.find_bin_index(xmin + widths[0] / 4.0) == 0 assert spec.find_bin_index(xmax - widths[-1] / 4.0) == len(spec) - 1 assert np.all(spec.find_bin_index(edges[:-1]) == np.arange(len(spec))) @pytest.mark.parametrize("spec_str", ["uniform", "default", "log"]) @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_index_out_of_bounds(spec_str, is_cal, apply_cal): """Raise a SpectrumError when we look for a bin index out of bounds, or an UncalibratedError when we ask to search bin_edges_kev in an uncal spectrum. """ spec = make_spec_listmode(spec_str, is_cal, apply_cal) edges, widths, _ = spec.get_bin_properties() xmin, xmax = edges[0], edges[-1] # out of histogram bounds with pytest.raises(bq.SpectrumError): spec.find_bin_index(xmax) with pytest.raises(bq.SpectrumError): spec.find_bin_index(xmin - widths[0] / 4.0) # UncalibratedError if not calibrated and we ask for calibrated if not spec.is_calibrated: with pytest.raises(bq.UncalibratedError): spec.find_bin_index(xmin, use_kev=True) @pytest.mark.parametrize("is_cal", [False, True]) @pytest.mark.parametrize("apply_cal", [None, False, True]) def test_bin_index_types(is_cal, apply_cal): """Additional bin index type checking.""" spec = make_spec_listmode("uniform", is_cal, apply_cal) assert isinstance(spec.find_bin_index(XMIN), (int, np.integer)) assert isinstance(spec.find_bin_index([XMIN]), np.ndarray) # ---------------------------------------------- # Test Spectrum repr behavior # ---------------------------------------------- def test_repr(cal_spec): repr(cal_spec) def test_str(cal_spec): str(cal_spec) # ---------------------------------------------- # Test Spectrum livetime properties # ---------------------------------------------- @pytest.fixture(params=[86400, 300.6, 0.88]) def livetime(request): return request.param def test_livetime_arg(spec_data, livetime): """Test manual livetime input.""" spec = bq.Spectrum(spec_data, livetime=livetime) assert spec.livetime == livetime def test_livetime_arg_cps(spec_data, livetime): """Test manual livetime input with CPS.""" cps = spec_data / float(livetime) spec = bq.Spectrum(cps=cps, livetime=livetime) assert spec.livetime == livetime def test_no_livetime(spec_data): """Test livetime property when not specified.""" spec = bq.Spectrum(spec_data) assert spec.livetime is None cps_spec = bq.Spectrum(cps=spec_data / 300.6) assert cps_spec.livetime is None # ---------------------------------------------- # Test start_time, stop_time, realtime # ---------------------------------------------- @pytest.mark.parametrize( "start, stop", [ ( datetime.datetime(2017, 1, 1, 17, 0, 3), datetime.datetime(2017, 1, 1, 18, 0, 3), ), ("2017-01-19 17:21:00", "2017-01-20 14:19:32"), (datetime.datetime(2017, 1, 1, 0, 30, 0, 385), "2017-01-01 12:44:22"), ], ) @pytest.mark.parametrize("rt", [3600, 2345.6]) def test_acqtime_construction(spec_data, start, stop, rt): """Test construction with 2 out of 3 of start, stop, and realtime.""" bq.Spectrum(spec_data, start_time=start, stop_time=stop) bq.Spectrum(spec_data, start_time=start, realtime=rt) bq.Spectrum(spec_data, realtime=rt, stop_time=stop) @pytest.mark.parametrize( "start, stop, rt, expected_err", [ ("2017-01-19 17:21:00", "2017-01-20 17:21:00", 86400, bq.SpectrumError), ("2017-01-19 17:21:00", "2017-01-18 17:21:00", None, ValueError), ], ) def test_bad_acqtime_construction(spec_data, start, stop, rt, expected_err): """Test bad construction of a spectrum with start, stop, or realtimes.""" with pytest.raises(expected_err): bq.Spectrum(spec_data, start_time=start, stop_time=stop, realtime=rt) def test_bad_realtime_livetime(spec_data): """Test error of livetime > realtime.""" with pytest.raises(ValueError): bq.Spectrum(spec_data, livetime=300, realtime=290) # ---------------------------------------------- # Test uncertainties in Spectrum # ---------------------------------------------- def test_construct_float_int(spec_data): """Construct spectrum with non-UFloats (float and int).""" spec = bq.Spectrum(spec_data) assert isinstance(spec.counts[0], UFloat) spec = bq.Spectrum(spec_data.astype(float)) assert isinstance(spec.counts[0], UFloat) def test_construct_ufloat(spec_data): """Construct spectrum with UFloats""" ucounts = unumpy.uarray(spec_data, np.ones_like(spec_data)) spec = bq.Spectrum(ucounts) assert isinstance(spec.counts[0], UFloat) assert spec.counts[0].std_dev == 1 def test_construct_float_int_uncs(spec_data): """Construct spectrum with non-UFloats and specify uncs.""" uncs = np.ones_like(spec_data) spec = bq.Spectrum(spec_data, uncs=uncs) assert isinstance(spec.counts[0], UFloat) uncs2 = np.array([c.std_dev for c in spec.counts]) assert np.allclose(uncs2, 1) def test_construct_errors(spec_data): """Construct spectrum with UFloats plus uncs and get an error.""" uncs = np.ones_like(spec_data) ucounts = unumpy.uarray(spec_data, uncs) with pytest.raises(bq.core.utils.UncertaintiesError): bq.Spectrum(ucounts, uncs=uncs) ucounts[0] = 1 with pytest.raises(bq.core.utils.UncertaintiesError): bq.Spectrum(ucounts) def test_properties(spec_data): """Test counts_vals and counts_uncs.""" spec = bq.Spectrum(spec_data) assert isinstance(spec.counts[0], UFloat) assert np.allclose(spec.counts_vals, spec_data) expected_uncs = np.sqrt(spec_data) expected_uncs[expected_uncs == 0] = 1 assert np.allclose(spec.counts_uncs, expected_uncs) uncs = spec_data ucounts = unumpy.uarray(spec_data, uncs) spec = bq.Spectrum(ucounts) assert np.allclose(spec.counts_vals, spec_data) assert np.allclose(spec.counts_uncs, uncs) uncs = np.ones_like(spec_data) spec = bq.Spectrum(spec_data, uncs=uncs) assert np.allclose(spec.counts_uncs, uncs) # ---------------------------------------------- # Test Spectrum.bin_widths # ---------------------------------------------- def test_bin_widths_kev(cal_spec): """Test Spectrum.bin_widths_kev""" cal_spec.bin_widths_kev assert len(cal_spec.bin_widths_kev) == len(cal_spec.counts) assert np.allclose(cal_spec.bin_widths_kev, TEST_GAIN) def test_bin_widths_uncal(uncal_spec): """Test Spectrum.bin_widths_raw""" uncal_spec.bin_widths_raw assert len(uncal_spec.bin_widths_raw) == len(uncal_spec.counts) # ---------------------------------------------- # Test Spectrum CPS and CPS/keV # ---------------------------------------------- @pytest.mark.parametrize( "construction_kwargs", [ {"livetime": 300.0}, {"livetime": 300.0, "bin_edges_kev": TEST_EDGES_KEV}, ], ) def test_cps(spec_data, construction_kwargs): """Test cps property and uncertainties on uncal and cal spectrum.""" spec = bq.Spectrum(spec_data, **construction_kwargs) spec.cps spec.cps_vals spec.cps_uncs assert np.all(spec.counts_vals == spec_data) assert np.allclose(spec.cps_vals, spec_data / spec.livetime) assert np.allclose(spec.cps_uncs, spec.counts_uncs / spec.livetime) def test_cpskev(spec_data, livetime): """Test cpskev property and uncertainties""" spec = bq.Spectrum(spec_data, livetime=livetime, bin_edges_kev=TEST_EDGES_KEV) spec.cpskev spec.cpskev_vals spec.cpskev_uncs assert np.allclose( spec.cpskev_vals, spec_data / spec.bin_widths_kev / float(livetime) ) assert np.allclose( spec.cpskev_uncs, spec.counts_uncs / spec.bin_widths_kev / float(livetime) ) def test_cps_cpsspec(spec_data, livetime): """Test cps property of CPS-style spectrum.""" spec = bq.Spectrum(cps=spec_data / float(livetime)) assert spec.cps is not None assert np.all(spec.cps_vals == spec_data / float(livetime)) assert np.all(np.isnan(spec.cps_uncs)) with pytest.raises(bq.SpectrumError): spec.counts with pytest.raises(bq.SpectrumError): spec.counts_vals with pytest.raises(bq.SpectrumError): spec.counts_uncs def test_cps_errors(uncal_spec): """Test errors in CPS.""" with pytest.raises(bq.SpectrumError): uncal_spec.cps def test_cpskev_errors(spec_data): """Test errors in CPS/keV.""" spec = bq.Spectrum(spec_data, livetime=300.9) with pytest.raises(bq.UncalibratedError): spec.cpskev # ---------------------------------------------- # Test addition and subtraction of spectra # ---------------------------------------------- @pytest.mark.parametrize( "lt1, lt2", [(300, 600), (12.6, 0.88), (300, 12.6), (12.6, None), (None, None)] ) @pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")]) def test_add(type1, type2, lt1, lt2): """Test addition of spectra""" spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2)) if lt1 and lt2: tot = spec1 + spec2 assert tot.livetime == lt1 + lt2 else: with pytest.warns(bq.SpectrumWarning): tot = spec1 + spec2 assert tot.livetime is None assert np.all(tot.counts == spec1.counts + spec2.counts) assert np.all(tot.counts_vals == spec1.counts_vals + spec2.counts_vals) @pytest.mark.parametrize( "type1, type2, expected_error", [ ("uncal", "cal", bq.SpectrumError), ("uncal", "uncal_long", bq.SpectrumError), ("uncal", "data", TypeError), ("data", "uncal", TypeError), ("uncal", 5, TypeError), (5, "cal", TypeError), ("cal", "asdf", TypeError), ("asdf", "uncal", TypeError), ("uncal", "data", TypeError), ("cal", "cal_new", NotImplementedError), ], ) def test_add_sub_errors(type1, type2, expected_error): """Test addition and subtraction that causes errors""" spec1, spec2 = make_spec(type1), make_spec(type2) with pytest.raises(expected_error): spec1 + spec2 with pytest.raises(expected_error): spec1 - spec2 @pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")]) def test_add_uncs(type1, type2): """Test uncertainties on addition of uncal spectra""" spec1, spec2 = make_spec(type1), make_spec(type2) with pytest.warns(bq.SpectrumWarning): tot = spec1 + spec2 uncs = np.sqrt(spec1.counts_uncs**2 + spec2.counts_uncs**2) assert np.allclose(tot.counts_uncs, uncs) @pytest.mark.parametrize( "type1, type2, lt1, lt2", [ ("uncal_cps", "uncal_cps", 300, 12.6), ("uncal_cps", "uncal_cps", None, 12.6), ("uncal_cps", "uncal_cps", None, None), ], ) def test_add_sub_cps(type1, type2, lt1, lt2): """Test addition and subtraction of CPS spectra""" spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2)) tot = spec1 + spec2 assert np.all(tot.cps_vals == spec1.cps_vals + spec2.cps_vals) assert tot.livetime is None diff = spec1 - spec2 assert diff.livetime is None assert np.all(diff.cps_vals == spec1.cps_vals - spec2.cps_vals) @pytest.mark.parametrize( "type1, type2, lt1, lt2", [ ("uncal", "uncal_cps", None, None), ("uncal_cps", "uncal", None, None), ("uncal", "uncal_cps", 300, None), ("uncal_cps", "uncal", None, 300), ("uncal", "uncal_cps", 300, 600), ("uncal_cps", "uncal", 600, 300), ], ) def test_adddition_errors(type1, type2, lt1, lt2): """Test errors during addition of mixed spectra""" spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2)) with pytest.raises(bq.SpectrumError): spec1 + spec2 @pytest.mark.parametrize("lt1, lt2", [(300, 600), (12.6, 0.88), (300, 12.6)]) @pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")]) def test_subtract_counts(type1, type2, lt1, lt2): """Test Spectrum subtraction with counts""" spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2)) with pytest.warns(bq.SpectrumWarning): diff = spec1 - spec2 assert diff.livetime is None assert np.allclose(diff.cps_vals, spec1.cps_vals - spec2.cps_vals) assert np.all(diff.cps_uncs > spec1.cps_uncs) assert np.all(diff.cps_uncs > spec2.cps_uncs) @pytest.mark.parametrize( "type1, type2, lt1, lt2", [ ("uncal", "uncal_cps", None, None), ("uncal_cps", "uncal", None, None), ("uncal", "uncal_cps", None, 300), ("uncal_cps", "uncal", 300, None), ("uncal", "uncal_cps", 300, None), ("uncal_cps", "uncal", None, 300), ], ) def test_subtract_errors(type1, type2, lt1, lt2): """Test errors/warnings during subtraction of mixed spectra""" spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2)) if lt1 is None and lt2 is None: with pytest.raises(bq.SpectrumError): diff = spec1 - spec2 else: with pytest.warns(bq.SpectrumWarning): diff = spec1 - spec2 assert diff.livetime is None # ---------------------------------------------- # Test multiplication and division of spectra # ---------------------------------------------- @pytest.mark.parametrize("factor", [0.88, 1, 2, 43.6]) @pytest.mark.parametrize("spectype", ["uncal", "cal"]) def test_basic_mul_div(spectype, factor): """ Basic multiplication/division of uncalibrated spectrum by a scalar. """ spec = make_spec(spectype) mult_left = spec * factor assert np.allclose(mult_left.counts_vals, factor * spec.counts_vals) assert np.allclose(mult_left.counts_uncs, factor * spec.counts_uncs) assert mult_left.livetime is None mult_right = factor * spec assert np.allclose(mult_right.counts_vals, factor * spec.counts_vals) assert np.allclose(mult_right.counts_uncs, factor * spec.counts_uncs) assert mult_right.livetime is None div = spec / factor assert np.allclose(div.counts_vals, spec.counts_vals / factor) assert np.allclose(div.counts_uncs, spec.counts_uncs / factor) assert div.livetime is None @pytest.mark.parametrize("factor", [0.88, 1, 2, 43.6]) def test_cps_mul_div(uncal_spec_cps, factor): """Multiplication/division of a CPS spectrum.""" mult_left = uncal_spec_cps * factor assert np.allclose(mult_left.cps_vals, factor * uncal_spec_cps.cps_vals) assert mult_left.livetime is None mult_right = factor * uncal_spec_cps assert np.allclose(mult_right.cps_vals, factor * uncal_spec_cps.cps_vals) assert mult_right.livetime is None div = uncal_spec_cps / factor assert np.allclose(div.cps_vals, uncal_spec_cps.cps_vals / factor) assert div.livetime is None @pytest.mark.parametrize("factor", [ufloat(0.88, 0.01), ufloat(1, 0.1), ufloat(43, 1)]) @pytest.mark.parametrize("spectype", ["uncal", "cal"]) def test_uncal_mul_div_uncertainties(spectype, factor): """ Multiplication/division of uncal spectrum by a scalar with uncertainty. """ spec = make_spec(spectype) mult_left = spec * factor assert np.allclose(mult_left.counts_vals, factor.nominal_value * spec.counts_vals) assert np.all( (mult_left.counts_uncs > factor.nominal_value * spec.counts_uncs) | (spec.counts_vals == 0) ) assert mult_left.livetime is None mult_right = factor * spec assert np.allclose(mult_right.counts_vals, factor.nominal_value * spec.counts_vals) assert np.all( (mult_right.counts_uncs > factor.nominal_value * spec.counts_uncs) | (spec.counts_vals == 0) ) assert mult_right.livetime is None div = spec / factor assert np.allclose(div.counts_vals, spec.counts_vals / factor.nominal_value) assert np.all( (div.counts_uncs > spec.counts_uncs / factor.nominal_value) | (spec.counts_vals == 0) ) assert div.livetime is None @pytest.mark.parametrize( "type1, type2, error", [ ("uncal", "uncal", TypeError), ("uncal", "asdf", TypeError), ("uncal", "data", TypeError), ("uncal", 0, ValueError), ("uncal", np.inf, ValueError), ("uncal", np.nan, ValueError), ("uncal", ufloat(0, 1), ValueError), ("uncal", ufloat(np.inf, np.nan), ValueError), ], ) def test_mul_div_errors(type1, type2, error): """Multiplication/division errors.""" spec, bad_factor = make_spec(type1), make_spec(type2) with pytest.raises(error): spec * bad_factor with pytest.raises(error): bad_factor * spec with pytest.raises(error): spec / bad_factor # ---------------------------------------------- # Test Spectrum.calibrate_like # ---------------------------------------------- def test_calibrate_like(uncal_spec, cal_spec): """Test calibrate_like with an uncalibrated spectrum.""" uncal_spec.calibrate_like(cal_spec) assert uncal_spec.is_calibrated assert np.all(uncal_spec.bin_edges_kev == cal_spec.bin_edges_kev) def test_recalibrate_like(cal_spec): """Test calibrate_like with an already calibrated spectrum.""" cal_new = make_spec("cal_new") edges1 = cal_spec.bin_edges_kev cal_spec.calibrate_like(cal_new) assert cal_spec.is_calibrated assert np.all(cal_spec.bin_edges_kev == cal_new.bin_edges_kev) assert cal_spec.bin_edges_kev[-1] != edges1[-1] def test_calibrate_like_error(uncal_spec, uncal_spec_2): """Test that calibrate_like raises an error if arg is uncalibrated""" with pytest.raises(bq.UncalibratedError): uncal_spec.calibrate_like(uncal_spec_2) def test_calibrate_like_copy(uncal_spec, cal_spec): """Test that calibrate_like makes a copy of the bin edges""" uncal_spec.calibrate_like(cal_spec) assert uncal_spec.bin_edges_kev is not cal_spec.bin_edges_kev cal_spec.rm_calibration() assert uncal_spec.is_calibrated # ---------------------------------------------- # Test Spectrum.combine_bins # ---------------------------------------------- @pytest.mark.parametrize("spectype", ["uncal", "cal", "uncal_cps"]) def test_combine_bins(spectype): """Test combine_bins with no padding.""" spec = make_spec(spectype) f = 8 combined = spec.combine_bins(f) assert len(combined) == TEST_DATA_LENGTH / f if spec._counts is not None: assert combined.counts_vals[0] == np.sum(spec.counts_vals[:f]) assert np.sum(combined.counts_vals) == np.sum(spec.counts_vals) else: assert combined.cps_vals[0] == np.sum(spec.cps_vals[:f]) assert np.sum(combined.cps_vals) == np.sum(spec.cps_vals) @pytest.mark.parametrize("spectype", ["uncal", "cal", "uncal_cps"]) def test_combine_bins_padding(spectype): """Test combine_bins with padding (an uneven factor).""" spec = make_spec(spectype) f = 10 combined = spec.combine_bins(f) assert len(combined) == np.ceil(float(TEST_DATA_LENGTH) / f) if spec._counts is not None: assert combined.counts_vals[0] == np.sum(spec.counts_vals[:f]) assert np.sum(combined.counts_vals) == np.sum(spec.counts_vals) else: assert combined.cps_vals[0] == np.sum(spec.cps_vals[:f]) assert np.sum(combined.cps_vals) == np.sum(spec.cps_vals) # calibration methods tested in energycal_test.py # ---------------------------------------------- # Test Spectrum.downsample # ---------------------------------------------- @pytest.mark.parametrize("spectype", ["uncal", "cal"]) @pytest.mark.parametrize("f", [2, 1.5, 999.99]) def test_downsample(spectype, f): """Test Spectrum.downsample on uncalibrated and calibrated spectra""" spec = make_spec(spectype, lam=1000) s1 = np.sum(spec.counts_vals) spec2 = spec.downsample(f) s2 = np.sum(spec2.counts_vals) r = float(s2) / s1 five_sigma = 5 * np.sqrt(s1 / f) / (s1 / f) assert np.isclose(r, 1.0 / f, atol=five_sigma) def test_no_downsample(cal_spec): """Test that downsample(1) doesn't do anything""" s1 = np.sum(cal_spec.counts_vals) spec2 = cal_spec.downsample(1.0) s2 = np.sum(spec2.counts_vals) assert s1 == s2 def test_zero_downsample(cal_spec): """Test that downsample(very large number) gives 0""" spec2 = cal_spec.downsample(10**10) s2 = np.sum(spec2.counts_vals) assert s2 == 0 def test_downsample_handle_livetime(cal_spec): """Test handle_livetime behavior""" f = 2 test_livetime = 300.0 cal_spec.livetime = test_livetime spec2 = cal_spec.downsample(f) assert spec2.livetime is None spec3 = cal_spec.downsample(f, handle_livetime="preserve") assert spec3.livetime == cal_spec.livetime spec4 = cal_spec.downsample(f, handle_livetime="reduce") assert spec4.livetime == cal_spec.livetime / f def test_downsample_error(cal_spec): """Test that downsample(<1) raises ValueError""" with pytest.raises(ValueError): cal_spec.downsample(0.5) def test_downsample_cps_error(uncal_spec_cps): """Test that downsampling a CPS spectrum gives a SpectrumError""" with pytest.raises(bq.SpectrumError): uncal_spec_cps.downsample(12) def test_downsample_handle_livetime_error(uncal_spec): """Test bad value of handle_livetime""" with pytest.raises(ValueError): uncal_spec.downsample(5, handle_livetime="asdf") # ---------------------------------------------- # Test Spectrum.__len__ # ---------------------------------------------- @pytest.fixture(params=[1, 8, 256, 16384]) def length(request): return request.param def test_len(length): """Test len(spectrum)""" floatdata = np.random.poisson(lam=TEST_COUNTS, size=length) spec = bq.Spectrum(floatdata.astype(int)) assert len(spec) == length def test_len_cps(length, livetime): """Test len(spectrum) for a CPS-based spectrum""" floatdata = np.random.poisson(lam=TEST_COUNTS, size=length) spec = bq.Spectrum(cps=floatdata / livetime) assert len(spec) == length # ---------------------------------------------- # Test Spectrum.copy # ---------------------------------------------- def test_copy_uncal(uncal_spec): """Test copy method on uncal spectrum""" uncal2 = uncal_spec.copy() assert np.all(uncal2.counts_vals == uncal_spec.counts_vals) assert np.all(uncal2.counts_uncs == uncal_spec.counts_uncs) assert uncal2 is not uncal_spec assert uncal2.counts is not uncal_spec.counts assert uncal2.counts[0] is not uncal_spec.counts[0] def test_copy_cal(cal_spec): """Test copy method on cal spectrum""" cal2 = cal_spec.copy() assert np.all(cal2.counts_vals == cal_spec.counts_vals) assert np.all(cal2.counts_uncs == cal_spec.counts_uncs) assert np.all(cal2.bin_edges_kev == cal_spec.bin_edges_kev) assert cal2 is not cal_spec assert cal2.counts is not cal_spec.counts assert cal2.counts[0] is not cal_spec.counts[0] assert cal2.bin_edges_kev is not cal_spec.bin_edges_kev # ---------------------------------------------- # Test Spectrum.rebin # ---------------------------------------------- @pytest.fixture( params=[ TEST_EDGES_KEV.copy(), TEST_EDGES_KEV.copy()[1:-2], np.linspace( TEST_EDGES_KEV.min(), TEST_EDGES_KEV.max(), len(TEST_EDGES_KEV) + 10 ), ], ids=["same edges", "subset of edges", "same bounds more bins"], ) def rebin_new_edges(request): return request.param.astype(float) @pytest.fixture( params=["interpolation", "listmode"], ids=["interpolation method", "listmode method"], ) def rebin_method(request): return request.param @pytest.fixture( params=[("uncal", 300), ("uncal", None), ("cal_cps", None)], ids=[ "uncalibrated spectrum with livetime", "uncalibrated spectrum without livetime", "calibrated spectrum with cps", ], ) def rebin_spectrum_failure(request): return make_spec(request.param[0], lt=request.param[1]) def test_spectrum_rebin_failure(rebin_spectrum_failure, rebin_new_edges, rebin_method): with pytest.raises(bq.SpectrumError): rebin_spectrum_failure.rebin( rebin_new_edges, method=rebin_method, zero_pad_warnings=False ) @pytest.fixture( params=[("cal", 300), ("cal", None), ("cal_cps", 300)], ids=[ "calibrated spectrum with livetime", "calibrated spectrum without livetime", "calibrated spectrum with cps and livetime", ], ) def rebin_spectrum_success(request): return make_spec(request.param[0], lt=request.param[1]) def test_spectrum_rebin_success(rebin_spectrum_success, rebin_new_edges, rebin_method): kwargs = dict( out_edges=rebin_new_edges, method=rebin_method, zero_pad_warnings=False ) if (rebin_spectrum_success._counts is None) and (rebin_method == "listmode"): with pytest.warns(bq.SpectrumWarning): spec = rebin_spectrum_success.rebin(**kwargs) else: spec = rebin_spectrum_success.rebin(**kwargs) assert np.isclose(rebin_spectrum_success.counts_vals.sum(), spec.counts_vals.sum()) if rebin_spectrum_success.livetime is None: assert spec.livetime is None else: assert
np.isclose(rebin_spectrum_success.livetime, spec.livetime)
numpy.isclose
# -*- coding: utf-8 -*- """ Spyder Editor Code written by <NAME> with modifications by <NAME> and <NAME> This file produces plots comparing our first order sensitivity with BS vega. """ # %% # To run the stuff, you need the package plotly in your anaconda "conda install plotly" import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.io as pio init_notebook_mode() pio.renderers.default='svg' import numpy as np import numpy.random import pandas as pd from scipy.stats import norm, multivariate_normal from scipy.optimize import minimize import time _tstart_stack = [] def tic(): _tstart_stack.append(time.time()) def toc(fmt="Elapsed: %s s"): print(fmt % (time.time() - _tstart_stack.pop())) # %% # We first provide the computation of a call option according to BS (we assume Log normal distribution) # definition of the dplus and minus functions # and the BS formula. def dplus(S, K, T, sigma): sigmaT = sigma * T ** 0.5 return np.log(S/K)/sigmaT + sigmaT/2 def dminus(S, K, T, sigma): sigmaT = sigma * T ** 0.5 return np.log(S/K)/sigmaT - sigmaT/2 def BS(S, K, T, sigma, Type = 1): factor1 = S * norm.cdf(Type * dplus(S, K, T, sigma)) factor2 = K * norm.cdf(Type * dminus(S, K, T, sigma)) return Type * (factor1 - factor2) # Now we provide the computation for the exact call according to the computations in BDT # We take p = 2 def Robust_Call_Exact_fun(S, K, T, sigma, delta): def fun(v): #v[0] = a, v[1] = lambda price = BS(S,max(K - (2 * v[0] + 1)/ (2 * v[1]),0.000001), T, sigma) return price + v[0] ** 2 / (2 * v[1]) + 0.5 * v[1] * delta ** 2 def cons_fun(v): # the value of v[0] should be constrained to keep strike positive tmp = K - (2 * v[0] + 1)/ (2 * v[1]) return tmp cons = ({'type': 'ineq', 'fun' : cons_fun}) guess =
np.array([0, 1])
numpy.array
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import math import onnx from onnx import helper, TensorProto, mapping import torch import torchvision import topi import topi.testing import tvm from tvm import te from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.testing.config import ctx_list import scipy def get_input_data_shape_dict(graph_def, input_data): if isinstance(input_data, list): input_names = {} shape_dict = {} for i, _ in enumerate(input_data): input_names[i] = graph_def.graph.input[i].name shape_dict[input_names[i]] = input_data[i].shape else: input_names = graph_def.graph.input[0].name shape_dict = {input_names: input_data.shape} return input_names, shape_dict def get_tvm_output_with_vm(graph_def, input_data, target, ctx, opset=None): """ Generic function to execute and get tvm output with vm executor""" _, shape_dict = get_input_data_shape_dict(graph_def, input_data) mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset) ex = relay.create_executor('vm', mod=mod, ctx=ctx, target=target) indata = tvm.nd.array(input_data) result = ex.evaluate()(indata) return result.asnumpy() def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output_dtype='float32', opset=None): """ Generic function to execute and get tvm output""" target = 'llvm' input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data) mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset) with tvm.transform.PassContext(opt_level=1): graph, lib, params = relay.build(mod, target, params=params) ctx = tvm.cpu(0) m = graph_runtime.create(graph, lib, ctx) # set inputs if isinstance(input_data, list): for i, e in enumerate(input_names): # Its possible for some onnx inputs to not be needed in the tvm # module, confirm its present before setting. try: m.set_input(input_names[i], tvm.nd.array( input_data[i].astype(input_data[i].dtype))) except: continue else: m.set_input(input_names, tvm.nd.array( input_data.astype(input_data.dtype))) m.set_input(**params) # execute m.run() # get outputs if isinstance(output_shape, list) and isinstance(output_dtype, list): tvm_output_list = [] for i, _ in enumerate(output_shape): tvm_output = m.get_output(i) tvm_output_list.append(tvm_output.asnumpy()) return tvm_output_list else: tvm_output = m.get_output(0) return tvm_output.asnumpy() def get_onnxruntime_output(model, inputs, dtype='float32'): import onnxruntime.backend rep = onnxruntime.backend.prepare(model, 'CPU') if isinstance(inputs, list) and len(inputs) > 1: ort_out = rep.run(inputs) else: x = inputs.astype(dtype) ort_out = rep.run(x)[0] return ort_out def verify_onnx_forward_impl(graph_file, data_shape, out_shape): dtype = 'float32' x = np.random.uniform(size=data_shape) model = onnx.load_model(graph_file) c2_out = get_onnxruntime_output(model, x, dtype) for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype) tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5) def test_reshape(): in_shape = (4, 3, 3, 4) ref_shape = (6, 2, 4, 3) ref_array = np.array(ref_shape) ref_node = onnx.helper.make_node('Constant', inputs=[], outputs=['ref_in'], value=onnx.helper.make_tensor(name='const_tensor', data_type=onnx.TensorProto.INT32, dims=ref_array.shape, vals=ref_array.flatten().astype(int))) reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"]) graph = helper.make_graph([ref_node, reshape_node], "reshape_test", inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='reshape_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('int32') tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32') tvm.testing.assert_allclose(ref_shape, tvm_out.shape) def test_expand(): def _test_expand(name, data, shape, ref_data): shape_array = np.array(shape) shape_node = onnx.helper.make_node('Constant', inputs=[], outputs=['shape'], value=onnx.helper.make_tensor(name = 'const_tensor', data_type = onnx.TensorProto.INT32, dims = shape_array.shape, vals = shape_array.flatten().astype('int32'))) expand_node = helper.make_node("Expand", ["in", "shape"], ["out"]) graph = helper.make_graph([shape_node, expand_node], "expand_test", inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(data.shape))], outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_data.shape))]) model = helper.make_model(graph, producer_name=name) for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, data, target, ctx, ref_data.shape, 'float32') tvm.testing.assert_allclose(ref_data, tvm_out) in_shape = (3, 1) shape = (3, 4) data = np.random.uniform(size=in_shape).astype(np.float32) ref_data = np.tile(data, 4) _test_expand('expand_with_dim_unchanged_test', data, shape, ref_data) in_shape = (3, 1) shape = (2, 1, 6) data = np.random.uniform(size=in_shape).astype(np.float32) ref_data = data * np.ones(shape, dtype=np.float32) _test_expand('expand_with_dim_changed_test', data, shape, ref_data) def verify_depth_to_space(inshape, outshape, mode, blockSize): node = onnx.helper.make_node('DepthToSpace', inputs=['x'], outputs=['y'], blocksize=blockSize) graph = helper.make_graph([node], "depth_to_space_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))], outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))]) model = helper.make_model(graph, producer_name='depth_to_space_test') for target, ctx in ctx_list(): x = np.random.uniform(size=inshape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, outshape, 'float32') onnx_out = get_onnxruntime_output(model, x, 'float32') tvm.testing.assert_allclose(onnx_out, tvm_out) def test_depth_to_space(): # current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument. # TO-DO, we can add mode arguement to test CRD mode and DCR mode # in the future when we update to a newer onnx version. verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2) def verify_space_to_depth(inshape, outshape, blockSize): node = onnx.helper.make_node('SpaceToDepth', inputs=['x'], outputs=['y'], blocksize=blockSize) graph = helper.make_graph([node], "space_to_depth_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))], outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))]) model = helper.make_model(graph, producer_name='space_to_depth_test') for target, ctx in ctx_list(): x = np.random.uniform(size=inshape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, outshape, 'float32') onnx_out = get_onnxruntime_output(model, x, 'float32') tvm.testing.assert_allclose(onnx_out, tvm_out) def test_space_to_depth(): verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2) def test_shape(): in_shape = (4, 3, 3, 4) ref_shape = (6, 2, 4, 3) ref_array = np.array(ref_shape) ref_node = onnx.helper.make_node('Constant', inputs=[], outputs=['ref_in'], value=onnx.helper.make_tensor(name='const_tensor', data_type=onnx.TensorProto.INT32, dims=ref_array.shape, vals=ref_array.flatten().astype(int))) reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"]) shape_node = helper.make_node("Shape", ['out'], ['final_out']) graph = helper.make_graph([ref_node, reshape_node, shape_node], "shape_test", inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("final_out", TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='shape_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('int32') tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32') tvm.testing.assert_allclose(ref_shape, tvm_out) def _test_power_iteration(x_shape, y_shape): if isinstance(y_shape, int): y_shape = [y_shape] x = np.random.uniform(size=x_shape).astype(np.float32) y = np.random.uniform(size=y_shape).astype(np.float32) np_res = np.power(x, y).astype(np.float32) res = helper.make_node("Pow", ['x', 'y'], ['out']) graph = helper.make_graph([res], 'power_test', inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)), helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))]) model = helper.make_model(graph, producer_name='power_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape) tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5) def test_power(): _test_power_iteration((1, 3), (1)) _test_power_iteration((2, 3), (2, 3)) _test_power_iteration((2, 3), (1, 3)) def test_squeeze(): in_shape = (1, 3, 1, 3, 1, 1) out_shape = (3, 3) y = helper.make_node("Squeeze", ['in'], ['out'], axes=[0, 2, 4, 5]) graph = helper.make_graph([y], 'squeeze_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_shape, tvm_out.shape) def test_flatten(): in_shape = (1, 3, 4, 4) axis = 1 ref_shape = (1, 48) flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis) graph = helper.make_graph([flatten_node], "flatten_test", inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='flatten_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('int32') tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32') tvm.testing.assert_allclose(ref_shape, tvm_out.shape) def test_unsqueeze(): in_shape = (3, 3) axis = (0, 3, 4) out_shape = (1, 3, 3, 1, 1) y = helper.make_node("Unsqueeze", ['in'], ['out'], axes=list(axis)) graph = helper.make_graph([y], 'squeeze_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') for target, ctx in ctx_list(): x = np.random.uniform(size=in_shape).astype('float32') tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_shape, tvm_out.shape) def verify_gather(in_shape, indices, axis, dtype): x = np.random.uniform(size=in_shape).astype(dtype) indices = np.array(indices, dtype="int32") out_np = np.take(x, indices, axis=axis) y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis) graph = helper.make_graph([y], 'gather_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape)), helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='gather_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [x, indices], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out) def test_gather(): verify_gather((4,), [1], 0, 'int32') verify_gather((1, 4), [0], 0, 'int32') verify_gather((4,), [[[1, 0], [0, 1]]], 0, 'float32') verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, 'int32') verify_gather((3, 3, 3), [[[1, 0]]], -1, 'int32') verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, 'float32') def verify_scatter(in_shape, indices, axis): x = np.random.uniform(size=in_shape).astype("float32") indices = np.array(indices, dtype="int32") updates = np.random.uniform(size=indices.shape).astype("float32") y = helper.make_node("ScatterElements", ['data', 'indices', 'updates'], ['output'], axis=axis) graph = helper.make_graph([y], 'scatter_test', inputs=[helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)), helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)), helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape))], outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))]) model = helper.make_model(graph, producer_name='scatter_test') onnx_out = get_onnxruntime_output(model, [x, indices, updates]) for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [x, indices, updates], target, ctx, onnx_out[0].shape) tvm.testing.assert_allclose(onnx_out[0], tvm_out) def test_scatter(): verify_scatter((4,), [1], 0) verify_scatter((1, 4), [[0]], 0) verify_scatter((4,), [2, 3], 0) verify_scatter((2, 2), [[1, 0], [0, 1]], 1) verify_scatter((3, 3, 3), [[[-1, -3]]], -1) verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0) def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None): if axes: y = helper.make_node( "Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends) else: y = helper.make_node( "Slice", ['in'], ['out'], starts=starts, ends=ends) graph = helper.make_graph([y], 'slice_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='slice_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, indata, target, ctx, outdata.shape, 'float32', opset=1) tvm.testing.assert_allclose(outdata, tvm_out) def _test_slice_iteration_v10(indata, outdata, starts, ends, axes=None): if isinstance(starts, int): starts = (starts, ) if isinstance(ends, int): ends = (ends, ) if isinstance(axes, int): axes = (axes, ) starts = np.asarray(starts) ends = np.asarray(ends) inputs = [ helper.make_tensor_value_info("data", TensorProto.FLOAT, list(indata.shape)), helper.make_tensor_value_info("starts", TensorProto.INT32, list(starts.shape)), helper.make_tensor_value_info("ends", TensorProto.INT32, list(ends.shape)) ] initializer = [ helper.make_tensor("starts", TensorProto.INT32, list(starts.shape), starts), helper.make_tensor("ends", TensorProto.INT32, list(ends.shape), ends) ] if axes: axes = np.asarray(axes) y = helper.make_node("Slice", ["data", "starts", "ends", "axes"], ["out"]) inputs.append( helper.make_tensor_value_info("axes", TensorProto.INT32, list(axes.shape))) initializer.append( helper.make_tensor("axes", TensorProto.INT32, list(axes.shape), axes)) else: y = helper.make_node("Slice", ["data", "starts", "ends"], ["out"]) graph = helper.make_graph([y], 'slice_test', inputs=inputs, outputs=[ helper.make_tensor_value_info( "out", TensorProto.FLOAT, list(outdata.shape)) ], initializer=initializer) model = helper.make_model(graph, producer_name='slice_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32', opset=10) tvm.testing.assert_allclose(outdata, tvm_out) def test_slice(): x = np.random.randn(20, 10, 5).astype(np.float32) _test_slice_iteration_v1(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) _test_slice_iteration_v1(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) _test_slice_iteration_v1(x, x[:, 1:1000], (1), (1000), (1)) _test_slice_iteration_v1(x, x[:, 0:-1], (0), (-1), (1)) _test_slice_iteration_v10(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) _test_slice_iteration_v10(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) _test_slice_iteration_v10(x, x[:, 1:1000], (1), (1000), (1)) _test_slice_iteration_v10(x, x[:, 0:-1], (0), (-1), (1)) def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): indata = np.random.uniform(-1, 1, size=inshape).astype(dtype) outdata = outfunc(indata, **npargs) y = helper.make_node(opname, ['in'], ['out'], **kwargs) graph = helper.make_graph([y], opname+'_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, indata, target, ctx, outdata.shape, dtype) tvm.testing.assert_allclose(outdata, tvm_out) def test_floor(): _test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, 'float32', 'Floor', {}) def test_ceil(): _test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, 'float32', 'Ceil', {}) def test_clip(): _test_onnx_op_elementwise((2, 4, 5, 6), np.clip, {'a_min': -1.0, 'a_max': 1.0}, 'float32', 'Clip', {'min': -1.0, 'max': 1.0}) def test_round(): _test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, 'float32', 'Round', {}) def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs): indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype) outdata = outfunc(indata, **npargs) y = helper.make_node(opname, ['in'], ['out'], **kwargs) graph = helper.make_graph([y], opname+'_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, indata, target, ctx, outdata.shape, dtype) tvm.testing.assert_allclose(outdata, tvm_out) def test_isinf(): _test_finite_ops((2, 4, 5, 6), np.isinf, {}, 'float32', 'IsInf', {}) def test_isnan(): _test_finite_ops((2, 4, 5, 6), np.isnan, {}, 'float32', 'IsNaN', {}) def verify_gather_nd(in_shape, indices, dtype): x = np.random.uniform(size=in_shape).astype(dtype) indices = np.array(indices, dtype="int32") out_np = topi.testing.gather_nd_python(x, indices) y = helper.make_node("GatherND", ['in', 'indices'], ['out']) graph = helper.make_graph([y], 'gather_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape)), helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='gather_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [x, indices], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out) def test_gather_nd(): verify_gather_nd((2, 2), [[0,0],[1,1]], 'int32') verify_gather_nd((3, 3, 3), [[0,1],[1,0]] , 'float32') verify_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], 'float32') def test_onehot(): indices_shape = [10] indices_array = np.random.randint( low=0, high=9, size=indices_shape, dtype='int32') depth = 10 values = np.asarray([0, 1]) out_np = np.eye(depth)[indices_array.reshape(-1)] onehot_node = helper.make_node( "OneHot", ["indices", "depth", "values"], ["out"]) graph = helper.make_graph([onehot_node], "onehot_test", inputs=[helper.make_tensor_value_info("indices", TensorProto.INT32, indices_shape), helper.make_tensor_value_info("depth", TensorProto.INT32, [1]), helper.make_tensor_value_info("values", TensorProto.INT32, values.shape)], initializer=[helper.make_tensor("depth", TensorProto.INT32, [1], [depth]), helper.make_tensor("values", TensorProto.INT32, values.shape, values)], outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)]) model = helper.make_model(graph, producer_name="onehot_test") for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [indices_array], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) def test_matmul(): a_shape = (4, 3) b_shape = (3, 4) a_array = np.random.uniform(size=a_shape).astype('float32') b_array = np.random.uniform(size=b_shape).astype('float32') out_np = np.matmul(a_array, b_array) mul_node = helper.make_node("MatMul", ["a", "b"], ["out"]) graph = helper.make_graph([mul_node], "matmul_test", inputs=[helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)), helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='matmul_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_array, b_array], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) def verify_batch_matmul(a_shape, b_shape): a_array = np.random.uniform(size=a_shape).astype('float32') b_array = np.random.uniform(size=b_shape).astype('float32') out_np = np.matmul(a_array, b_array) mul_node = helper.make_node("MatMul", ["a", "b"], ["out"]) graph = helper.make_graph([mul_node], "matmul_test", inputs=[helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)), helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='matmul_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_array, b_array], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) def test_batch_matmul(): verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4)) verify_batch_matmul((2, 4, 3), (3, 4)) verify_batch_matmul((2, 3, 4, 3), (3, 4)) def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None): in_array = np.random.uniform(size=shape).astype(dtype) if alpha == None and beta == None and bias == None: alpha = 0.0001 beta = 0.75 bias = 1.0 node = onnx.helper.make_node( 'LRN', inputs=['in'], outputs=['out'], size=nsize) else: node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha, beta=beta, bias=bias, size=nsize) graph = helper.make_graph([node], "lrn_test", inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))]) model = helper.make_model(graph, producer_name='lrn_test') def _get_python_lrn(): square_sum = np.zeros(shape).astype(dtype) for n, c, h, w in np.ndindex(in_array.shape): square_sum[n, c, h, w] = sum(in_array[n, max(0, c - int(math.floor((nsize - 1) / 2))): min(5, c + int(math.ceil((nsize - 1) / 2)) + 1), h, w] ** 2) py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta) return py_out for target, ctx in ctx_list(): input_name = model.graph.input[0].name py_out = _get_python_lrn() tvm_out = get_tvm_output( model, in_array, target, ctx, py_out.shape, 'float32') tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5) def test_lrn(): verify_lrn((5, 5, 5, 5), 3, 'float32') verify_lrn((5, 5, 5, 5), 3, 'float32', alpha=0.0002, beta=0.5, bias=2.0) def verify_instance_norm(shape, axis=1): def _get_python_instance_norm(x, gamma, beta, epsilon=1e-5): dims_x = len(x.shape) axis = tuple(range(2, dims_x)) mean = np.mean(x, axis=axis, keepdims=True) var = np.var(x, axis=axis, keepdims=True) dim_ones = (1,) * (dims_x - 2) gamma = gamma.reshape(-1, *dim_ones) beta = beta.reshape(-1, *dim_ones) return gamma * (x - mean) / np.sqrt(var + epsilon) + beta x = np.random.randn(*shape).astype(np.float32) gamma = np.random.randn(shape[1]).astype(np.float32) beta = np.random.randn(shape[1]).astype(np.float32) epsilon = 1e-5 y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32) node = onnx.helper.make_node( 'InstanceNormalization', inputs=['x', 'gamma', 'beta'], outputs=['y'], epsilon=epsilon, ) graph = helper.make_graph([node], "instance_norm_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)), helper.make_tensor_value_info( "gamma", TensorProto.FLOAT, (shape[1],)), helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],))], outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))]) model = helper.make_model(graph, producer_name='instance_norm_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [x, gamma, beta], target, ctx, shape, 'float32') tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5) def test_instance_norm(): verify_instance_norm((2, 3, 4, 5)) verify_instance_norm((32, 64, 80, 64)) verify_instance_norm((8, 6, 5)) verify_instance_norm((8, 7, 6, 5, 4)) def _test_upsample_nearest(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in'], [ 'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.upsampling_python( in_array, (scale, scale), "NCHW") graph = helper.make_graph([y], 'upsample_nearest_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_nearest_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out) def _test_upsample3d_nearest(): scale = 2 in_shape = (1, 1, 3, 3, 3) out_shape = (1, 1, 3*scale, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in'], [ 'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.upsampling3d_python( in_array, (scale, scale, scale), "NCDHW") graph = helper.make_graph([y], 'upsample_nearest_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_nearest_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out) def _test_upsample_bilinear(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in'], [ 'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.bilinear_resize_python( in_array, (3*scale, 3*scale), "NCHW") graph = helper.make_graph([y], 'upsample_bilinear_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_bilinear_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5) def _test_upsample_bilinear_opset9(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear') scales = [1, 1, 2, 2] in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.bilinear_resize_python( in_array, (3*scale, 3*scale), "NCHW") ref_node = helper.make_node('Constant', inputs=[], outputs=['const'], value=onnx.helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=scales, vals=np.random.random(scales).flatten().astype(float))) shape_node = helper.make_node("Shape", ['const'], ['scales']) graph = helper.make_graph([ref_node, shape_node, y], 'upsample_bilinear_opset9_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model( graph, producer_name='upsample_bilinear_opset9_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5) def _test_upsample3d_trilinear(): scale = 2 in_shape = (1, 1, 3, 3, 3) out_shape = (1, 1, 3*scale, 3*scale, 3*scale) y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear') scales = [1.0, 1.0, 2.0, 2.0, 2.0] in_array = np.random.uniform(size=in_shape).astype(np.float32) out_array = topi.testing.trilinear_resize3d_python( in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel") ref_array = np.array(scales) ref_node = helper.make_node('Constant', inputs=[], outputs=['scales'], value=onnx.helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=ref_array.shape, vals=ref_array.flatten().astype(float))) graph = helper.make_graph([ref_node, y], 'upsample_trilinear_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model( graph, producer_name='upsample_trilinear_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5) def test_upsample(): _test_upsample_nearest() _test_upsample_bilinear() _test_upsample_bilinear_opset9() _test_upsample3d_nearest() _test_upsample3d_trilinear() def _test_softmax(inshape, axis): opname = 'Softmax' indata = np.random.uniform(size=inshape).astype(np.float32) outshape = inshape outdata = topi.testing.softmax_python(indata) if isinstance(axis, int): y = helper.make_node(opname, ['in'], ['out'], axis=axis) elif axis is None: y = helper.make_node(opname, ['in'], ['out']) graph = helper.make_graph([y], opname+'_test', inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, indata, target, ctx, outshape, 'float32') tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) def test_softmax(): _test_softmax((1, 10), None) _test_softmax((1, 10), 1) def verify_min(input_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) a_np3 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.min((a_np1, a_np2, a_np3), axis=0) min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"]) graph = helper.make_graph([min_node], "Min_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Min_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def test_forward_min(): verify_min((1, 3, 20, 20)) verify_min((20, 20)) def verify_max(input_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) a_np3 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.max((a_np1, a_np2, a_np3), axis=0) max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"]) graph = helper.make_graph([max_node], "Max_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Max_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def test_forward_max(): verify_max((1, 3, 20, 20)) verify_max((20, 20)) def verify_mean(input_dim): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) a_np2 = np.random.uniform(size=input_dim).astype(dtype) a_np3 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.mean((a_np1, a_np2, a_np3), axis=0) mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"]) graph = helper.make_graph([mean_node], "Mean_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)), helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Mean_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def test_forward_mean(): verify_mean((1, 3, 20, 20)) verify_mean((20, 20)) def verify_hardsigmoid(input_dim, alpha, beta): dtype = 'float32' a_np1 = np.random.uniform(size=input_dim).astype(dtype) b_np = np.clip(a_np1 * alpha + beta, 0, 1) hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], [ "out"], alpha=alpha, beta=beta) graph = helper.make_graph([hardsigmoid_node], "HardSigmoid_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='HardSigmoid_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def test_forward_hardsigmoid(): verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6) verify_hardsigmoid((20, 20), 0.3, 0.4) def verify_argmin(input_dim, axis=None, keepdims=None): def _argmin_numpy(data, axis=0, keepdims=True): result = np.argmin(data, axis=axis) if (keepdims == 1): result = np.expand_dims(result, axis) return result.astype(data.dtype) a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32) if keepdims is None and axis is None: b_np = _argmin_numpy(a_np1) node = onnx.helper.make_node('ArgMin', inputs=['a_np1'], outputs=['out']) elif axis is None: b_np = _argmin_numpy(a_np1, keepdims=keepdims) node = onnx.helper.make_node('ArgMin', inputs=['a_np1'], outputs=['out'], keepdims=keepdims) elif keepdims is None: b_np = _argmin_numpy(a_np1, axis=axis) node = onnx.helper.make_node('ArgMin', inputs=['a_np1'], outputs=['out'], axis=axis) else: b_np = _argmin_numpy(a_np1, axis=axis, keepdims=keepdims) node = onnx.helper.make_node('ArgMin', inputs=['a_np1'], outputs=['out'], axis=axis, keepdims=keepdims) graph = helper.make_graph([node], "argmin_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, list(b_np.shape))]) model = helper.make_model(graph, producer_name='argmin_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_np1], target, ctx, b_np.shape, b_np.dtype) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def verify_argmax(input_dim, axis=None, keepdims=None): def _argmax_numpy(data, axis=0, keepdims=True): result = np.argmax(data, axis=axis) if (keepdims == 1): result = np.expand_dims(result, axis) return result.astype(data.dtype) a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32) if keepdims is None and axis is None: b_np = _argmax_numpy(a_np1) node = onnx.helper.make_node('ArgMax', inputs=['a_np1'], outputs=['out']) elif axis is None: b_np = _argmax_numpy(a_np1, keepdims=keepdims) node = onnx.helper.make_node('ArgMax', inputs=['a_np1'], outputs=['out'], keepdims=keepdims) elif keepdims is None: b_np = _argmax_numpy(a_np1, axis=axis) node = onnx.helper.make_node('ArgMax', inputs=['a_np1'], outputs=['out'], axis=axis) else: b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims) node = onnx.helper.make_node('ArgMax', inputs=['a_np1'], outputs=['out'], axis=axis, keepdims=keepdims) graph = helper.make_graph([node], "argmax_test", inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, list(b_np.shape))]) model = helper.make_model(graph, producer_name='argmax_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [a_np1], target, ctx, b_np.shape, b_np.dtype) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) def test_forward_arg_min_max(): '''Verify argmin and argmax''' verify_argmin([3, 4, 4]) verify_argmax([3, 4, 4]) verify_argmin([3, 4, 4], axis=1) verify_argmax([3, 4, 4], axis=0) verify_argmin([3, 4, 4], keepdims=0) verify_argmax([3, 4, 4], keepdims=1) for axis in [None, 0, 1, 2]: for keepdims in [None, True, False]: verify_argmin([3, 4, 4], axis, keepdims) verify_argmax([3, 4, 4], axis, keepdims) def verify_constantofshape(input_dim, value, dtype): out = np.empty(shape=input_dim, dtype=dtype) out.fill(value) fill_node = helper.make_node("ConstantOfShape", ["input"], ["output"], value=helper.make_tensor( 'value', mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1, ), (value, ))) inputs = [ helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim) ] graph = helper.make_graph( [fill_node], "fill_test", inputs, outputs=[ helper.make_tensor_value_info("output", TensorProto.FLOAT, list(out.shape)) ], initializer=[ helper.make_tensor("input", TensorProto.INT32, (len(input_dim), ), input_dim) ]) model = helper.make_model(graph, producer_name='fill_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [], target, ctx, out.shape) tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5) def test_constantofshape(): verify_constantofshape((2, 3, 4, 5), 10, 'float32') verify_constantofshape((3, 3), 0, 'int32') verify_constantofshape((1, 2, 3), -1, 'float32') def verify_pad(indata, pads, mode='constant', value=0.0): indata = np.array(indata).astype(np.float32) # numpy expect result len_dim = len(pads) // 2 np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)] # onnx graph if mode in ['edge', 'reflect']: outdata = np.pad(indata, pad_width=np_pads, mode=mode) node = helper.make_node( 'Pad', inputs=['input'], outputs=['output'], mode=mode, pads=pads, ) else: outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value) node = helper.make_node( 'Pad', inputs=['input'], outputs=['output'], mode='constant', pads=pads, value=value ) graph = helper.make_graph([node], 'pad_test', inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='pad_test') # tvm result for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, indata, target, ctx, outdata.shape, 'float32', opset=2) tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) def verify_pad_v11(indata, pads, mode='constant', value=0.0): indata = np.array(indata).astype(np.float32) # numpy expect result len_dim = len(pads) // 2 np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)] pads = np.array(pads) # onnx graph if mode in ['edge', 'reflect']: inputs = [indata, pads] outdata = np.pad(indata, pad_width=np_pads, mode=mode) node = helper.make_node( 'Pad', inputs=['input', 'pads'], outputs=['output'], mode=mode ) graph = helper.make_graph([node], 'pad_test', inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)), helper.make_tensor_value_info("pads", TensorProto.INT64,(len(pads),))], initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)], outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))]) else: inputs = [indata, pads, np.array([value])] outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value) node = helper.make_node( 'Pad', inputs=['input', 'pads', 'constant_value'], outputs=['output'], mode='constant' ) graph = helper.make_graph([node], 'pad_test', inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)), helper.make_tensor_value_info("pads", TensorProto.INT64,(len(pads),)), helper.make_tensor_value_info("constant_value", TensorProto.INT64,(1,)), ], initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads), helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value])], outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='pad_test') # tvm result for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, inputs, target, ctx, outdata.shape, 'float32', opset=11) tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) def test_pad(): verify_pad(np.random.randn(2, 2).astype( np.float32), [0, 1, 0, 0], 'constant', 0.0) verify_pad(np.random.randn(2, 3).astype( np.float32), [1, 0, 0, 1], 'constant', 0.0) verify_pad(np.random.randn(3, 2).astype( np.float32), [0, 0, 1, 0], 'constant', 5.0) verify_pad(np.random.randn(1, 3, 4, 5).astype( np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge') verify_pad(np.random.randn(1, 3, 4, 5).astype( np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect') verify_pad_v11(np.random.randn(2, 2).astype( np.float32), [0, 1, 0, 0], 'constant', 0.0) verify_pad_v11(np.random.randn(2, 3).astype( np.float32), [1, 0, 0, 1], 'constant', 0.0) verify_pad_v11(np.random.randn(3, 2).astype( np.float32), [0, 0, 1, 0], 'constant', 5.0) verify_pad_v11(np.random.randn(1, 3, 4, 5).astype( np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge') verify_pad_v11(np.random.randn(1, 3, 4, 5).astype( np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect') def verify_reduce_func(func, data, axis, keepdims): inshape = data.shape outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape if axis: node = onnx.helper.make_node(func, inputs=['x'], outputs=['y'], axes=axis, keepdims=keepdims) else: node = onnx.helper.make_node(func, inputs=['x'], outputs=['y'], keepdims=keepdims) graph = helper.make_graph([node], "reduce_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))], outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))]) model = helper.make_model(graph, producer_name='reduce_test') onnx_out = get_onnxruntime_output(model, data, 'float32') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, data, target, ctx, outshape, 'float32') tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5) def test_all_reduce_funcs(): funcs = ["ReduceMax", "ReduceMean", "ReduceMin", "ReduceProd", "ReduceSum", 'ReduceSumSquare', "ReduceLogSum", "ReduceLogSumExp", "ReduceL1", "ReduceL2"] for func in funcs: for keepdims in [True, False]: verify_reduce_func(func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims) verify_reduce_func(func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims) verify_reduce_func(func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims) verify_reduce_func(func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims) verify_reduce_func(func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims) verify_reduce_func(func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims) def verify_split(indata, outdatas, split, axis=0): indata = np.array(indata).astype(np.float32) outdatas = [np.array(o).astype(np.float32) for o in outdatas] if split: split_index = range(len(split)) else: split_index = range(len(outdatas)) node = helper.make_node( 'Split', inputs=['input'], outputs=['output_{}'.format(i) for i in range(len(split_index))], axis=axis, split=split ) graph = helper.make_graph([node], 'split_test', inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("output_{}".format(i), TensorProto.FLOAT, list(outdatas[i].shape)) for i in range(len(split_index)) ]) model = helper.make_model(graph, producer_name='split_test') for target, ctx in ctx_list(): output_shape = [o.shape for o in outdatas] output_type = ['float32', 'float32', 'float32'] tvm_out = get_tvm_output( model, indata, target, ctx, output_shape, output_type) for o, t in zip(outdatas, tvm_out): tvm.testing.assert_allclose(o, t) def test_split(): # 1D verify_split([1., 2., 3., 4., 5., 6.], [ [1., 2.], [3., 4.], [5., 6.]], [2, 2, 2], 0) verify_split([1., 2., 3., 4., 5., 6.], [ [1., 2.], [3.], [4., 5., 6.]], [2, 1, 3], 0) # 2D verify_split([[1., 2., 3., 4.], [7., 8., 9., 10.]], [[[1., 2.], [7., 8.]], [[3., 4.], [9., 10.]]], [2, 2], 1) # Split evenly (unstack) verify_split([1, 2, 3], [[1], [2], [3]], False) def test_binary_ops(): in_shape = (1, 2, 3, 3) dtype = "float32" out_shape = in_shape def verify_binary_ops(op, x, y, out_np, x_name='in1', y_name='in2', broadcast=None): if broadcast is None: z = helper.make_node(op, [x_name, y_name], ['out']) else: z = helper.make_node(op, [x_name, y_name], ['out'], broadcast=1) graph = helper.make_graph([z], '_test', inputs=[helper.make_tensor_value_info(x_name, TensorProto.FLOAT, list(in_shape)), helper.make_tensor_value_info(y_name, TensorProto.FLOAT, list(in_shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx) tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) x = np.random.uniform(size=in_shape).astype(dtype) y = np.random.uniform(size=in_shape).astype(dtype) z = np.random.uniform(size=(3,)).astype(dtype) verify_binary_ops("Add", x, y, x + y, broadcast=None) verify_binary_ops("Add", x, z, x + z, broadcast=True) verify_binary_ops("Sub", x, y, x - y, broadcast=None) verify_binary_ops("Sub", x, z, x - z, broadcast=True) verify_binary_ops("Mul", x, y, x * y, broadcast=None) verify_binary_ops("Mul", x, z, x * z, broadcast=True) verify_binary_ops("Mul", x, x, x * x, x_name='in1', y_name='in1', broadcast=None) verify_binary_ops("Div", x, y, x / y, broadcast=None) verify_binary_ops("Div", x, z, x / z, broadcast=True) verify_binary_ops("Sum", x, y, x + y, broadcast=None) verify_binary_ops("Greater", x, y, x > y, broadcast=True) verify_binary_ops("Less", x, y, x < y, broadcast=True) verify_binary_ops("Equal", x, y, x == y, broadcast=True) def test_single_ops(): in_shape = (1, 2, 3, 3) dtype = "float32" out_shape = in_shape def verify_single_ops(op, x, out_np, rtol=1e-5, atol=1e-5): z = helper.make_node(op, ['in1'], ['out']) graph = helper.make_graph([z], '_test', inputs=[helper.make_tensor_value_info("in1", TensorProto.FLOAT, list(in_shape)), ], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x], target, ctx) tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol) x = np.random.uniform(size=in_shape).astype(dtype) verify_single_ops("Neg", x, -x) verify_single_ops("Abs", x, np.abs(x)) verify_single_ops("Reciprocal", x, 1/x) verify_single_ops("Sqrt", x, np.sqrt(x)) verify_single_ops("Relu", x, np.maximum(x, 0)) verify_single_ops("Exp", x, np.exp(x)) verify_single_ops("Log", x, np.log(x)) verify_single_ops("Log", x, np.log(x)) verify_single_ops("ACos", x, np.arccos(x)) verify_single_ops("ACosh", x, np.arccosh(x)) verify_single_ops("ASin", x, np.arcsin(x)) verify_single_ops("ASinh", x, np.arcsinh(x)) verify_single_ops("ATan", x, np.arctan(x)) verify_single_ops("ATanh", x, np.arctanh(x)) verify_single_ops("Cos", x, np.cos(x)) verify_single_ops("Cosh", x, np.cosh(x)) verify_single_ops("Sin", x, np.sin(x)) verify_single_ops("Sinh", x, np.sinh(x)) verify_single_ops("Tan", x, np.tan(x)) verify_single_ops("Tanh", x, np.tanh(x)) verify_single_ops("Sigmoid", x, 1 / (1 + np.exp(-x))) verify_single_ops("Softsign", x, x / (1 + np.abs(x))) verify_single_ops("SoftPlus", x, np.log(1 + np.exp(x))) def test_leaky_relu(): def leaky_relu_x(x, alpha): return np.where(x >= 0, x, x * alpha) _test_onnx_op_elementwise((2, 4, 5, 6), leaky_relu_x, {'alpha': 0.25}, 'float32', 'LeakyRelu', {'alpha': 0.25}) def test_elu(): def elu_x(x, alpha): return np.where(x > 0, x, alpha * (np.exp(x) - 1.0)) _test_onnx_op_elementwise((2, 4, 5, 6), elu_x, {'alpha': 0.25}, 'float32', 'Elu', {'alpha': 0.25}) def test_selu(): def selu_x(x, alpha, gamma): return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0)) _test_onnx_op_elementwise((2, 4, 5, 6), selu_x, {'alpha': 0.25, 'gamma': 0.3}, 'float32', 'Selu', {'alpha': 0.25, 'gamma': 0.3}) def test_prelu(): def verify_prelu(x_shape, a_shape): node = helper.make_node('PRelu', inputs=['X', 'slope'], outputs=['Y']) graph = helper.make_graph([node], "prelu_test", inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)), helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape))], outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))]) model = helper.make_model(graph, producer_name='prelu_test') indata = np.random.uniform(-10, 10, x_shape).astype(np.float32) slopedata = np.random.uniform(-10, 10, a_shape).astype(np.float32) onnx_out = get_onnxruntime_output(model, [indata, slopedata]) for target, ctx in [('llvm', tvm.cpu())]: tvm_out = get_tvm_output(model, [indata, slopedata], target, ctx, list(x_shape), output_dtype='float32') tvm.testing.assert_allclose(onnx_out[0], tvm_out, rtol=1e-05, atol=1e-05) verify_prelu([3,4,5,6], [1, 4, 1, 1]) verify_prelu([1,8,5,6], [1, 8, 1, 1]) verify_prelu([2,12,16,16], [1, 12, 1, 1]) def test_ThresholdedRelu(): def ThresholdedRelu_x(x, alpha): out_np = np.clip(x, alpha, np.inf) out_np[out_np == alpha] = 0 return out_np _test_onnx_op_elementwise((2, 4, 5, 6), ThresholdedRelu_x, {'alpha': 0.25}, 'float32', 'ThresholdedRelu', {'alpha': 0.25}) def test_ScaledTanh(): def ScaledTanh_x(x, alpha, beta): return alpha * np.tanh(beta * x) _test_onnx_op_elementwise((2, 4, 5, 6), ScaledTanh_x, {'alpha': 0.25, 'beta': 0.3}, 'float32', 'ScaledTanh', {'alpha': 0.25, 'beta': 0.3}) def test_ParametricSoftplus(): def ParametricSoftplus_x(x, alpha, beta): return alpha * np.log(np.exp(beta * x) + 1) _test_onnx_op_elementwise((2, 4, 5, 6), ParametricSoftplus_x, {'alpha': 0.25, 'beta': 0.3}, 'float32', 'ParametricSoftplus', {'alpha': 0.25, 'beta': 0.3}) def test_Scale(): def Scale_x(x, scale): return scale * x _test_onnx_op_elementwise((2, 4, 5, 6), Scale_x, {'scale': 0.25}, 'float32', 'Scale', {'scale': 0.25}) def test_LogSoftmax(): _test_onnx_op_elementwise((1, 4), topi.testing.log_softmax_python, {}, 'float32', 'LogSoftmax', {'axis': 1}) def check_torch_conversion(model, input_size): dummy_input = torch.randn(*input_size) file_name = '{}.onnx'.format(model.__name__) # Set verbose=True for more output torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False) onnx_model = onnx.load(file_name) for target, ctx in ctx_list(): input_data = np.random.uniform(size=input_size).astype('int32') c2_out = get_onnxruntime_output(onnx_model, input_data) tvm_out = get_tvm_output(onnx_model, input_data, target, ctx) tvm.testing.assert_allclose(c2_out, tvm_out) def test_resnet(): check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224)) # check_torch_conversion(torchvision.models.resnet101, (1,3,224,224)) # def test_alexnet(): # Torch's ONNX export does not support the adaptive pooling used by AlexNet? # check_torch_conversion(torchvision.models.alexnet, (1,3,224,224)) # Torch's ONNX export does not support the adaptive pooling used by vgg16? # def test_vgg16(): # check_torch_conversion(torchvision.models.vgg16, (1,3,224,224)) # TODO(@jroesch): Update Torch + ONNX to support this import. # def test_squeezenet(): # # Torch's ONNX export does not support the max pooling used by Squezenet # check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224)) def test_densenet(): check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224)) def test_inception(): check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224)) # TODO(@jroesch): Update Torch + ONNX to support this import. # def test_googlenet(): # check_torch_conversion(torchvision.models.googlenet, (1,3,224,224)) # TODO(@jroesch): Update Torch + ONNX to support this import. # def test_shufflenetv2(): # check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224)) def test_sign(): def Sign_x(x): return np.sign(x) _test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, 'float32', 'Sign', {}) def verify_not(indata, dtype): x = indata.astype(dtype) outdata = np.logical_not(x) node = helper.make_node('Not', inputs=['in'], outputs=['out'],) graph = helper.make_graph([node], 'not_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.BOOL, list(x.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))]) model = helper.make_model(graph, producer_name='not_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) def test_not(): # 2d verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool) # 3d verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool) # 4d verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool) def verify_and(indata, dtype): x = indata[0].astype(dtype) y = indata[1].astype(dtype) outdata = np.logical_and(x, y) node = helper.make_node('And', inputs=['in1', 'in2'], outputs=['out'], ) graph = helper.make_graph([node], 'and_test', inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)), helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))]) model = helper.make_model(graph, producer_name='and_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) def test_and(): # 2d x = (np.random.randn(3, 4) > 0) y = (np.random.randn(3, 4) > 0) verify_and(indata=[x, y], dtype=bool) # 3d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(3, 4, 5) > 0) verify_and(indata=[x, y], dtype=bool) # 4d x = (np.random.randn(3, 4, 5, 6) > 0) y = (np.random.randn(3, 4, 5, 6) > 0) verify_and(indata=[x, y], dtype=bool) # 3d vs 1d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(5) > 0) verify_and(indata=[x, y], dtype=bool) # 3d vs 2d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(4, 5) > 0) verify_and(indata=[x, y], dtype=bool) def verify_tile_v1(indata, outdata, **kwargs): node = helper.make_node('Tile', inputs=['in'], outputs=['out'], **kwargs) graph = helper.make_graph([node], 'tile_test', inputs=[helper.make_tensor_value_info( "in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='tile_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output( model, [indata], target, ctx, outdata.shape, opset=1) tvm.testing.assert_allclose(outdata, tvm_out) def verify_tile_v6(indata, repeats, outdata): node = helper.make_node('Tile', inputs=['input', 'repeats'], outputs=['out']) graph = helper.make_graph( [node], 'tile_test', inputs=[ helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)), helper.make_tensor_value_info("repeats", TensorProto.INT64, list(repeats.shape)) ], outputs=[ helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape)) ], initializer=[ helper.make_tensor("repeats", TensorProto.INT64, list(repeats.shape), repeats) ]) model = helper.make_model(graph, producer_name='tile_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=6) tvm.testing.assert_allclose(outdata, tvm_out) def test_tile(): x = np.random.rand(2, 3, 4, 5).astype(np.float32) repeats = np.random.randint( low=1, high=10, size=(np.ndim(x),)).astype(np.int64) z = np.tile(x, repeats) verify_tile_v1(x, z, repeats=repeats) verify_tile_v6(x, repeats, z) def verify_erf(indata, outdata): node = helper.make_node('Erf', inputs=['in'], outputs=['out']) graph = helper.make_graph([node], 'erf_test', inputs=[helper.make_tensor_value_info( 'in', TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info('out', TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='erf_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) def test_erf(): x = np.random.rand(2, 3, 4, 6).astype(np.float32) z = scipy.special.erf(x) verify_erf(x, z) def verify_where(condition, x, y, dtype, outdata): node = helper.make_node('Where', inputs=['condition', 'x', 'y'], outputs=['out']) graph = helper.make_graph([node], 'where_test', inputs=[helper.make_tensor_value_info('condition', TensorProto.BOOL, list(condition.shape)), helper.make_tensor_value_info('x', dtype, list(x.shape)), helper.make_tensor_value_info('y', dtype, list(y.shape))], outputs=[helper.make_tensor_value_info('out', dtype, list(outdata.shape))]) model = helper.make_model(graph, producer_name='where_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [condition, x, y], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) def test_where(): condition = np.array([[1, 0], [1, 1]], dtype=np.bool) x = np.array([[1, 2], [3, 4]], dtype=np.int64) y = np.array([[9, 8], [7, 6]], dtype=np.int64) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.INT64, outdata) x = np.array([[1, 2], [3, 4]], dtype=np.float32) y = np.array([[9, 8], [7, 6]], dtype=np.float32) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.FLOAT, outdata) x = np.array(1, dtype=np.float32) y = np.array([2], dtype=np.float32) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.FLOAT, outdata) x = np.array([2], dtype=np.float32) y = np.array(1, dtype=np.float32) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.FLOAT, outdata) condition = np.array(1, dtype=np.bool) x = np.array([[1, 2], [3, 4]], dtype=np.float32) y = np.array([[5, 6], [7, 8]], dtype=np.float32) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.FLOAT, outdata) x = np.array([[1, 2], [3, 4]], dtype=np.float32) y = np.array([[1], [7]], dtype=np.float32) outdata = np.where(condition, x, y) verify_where(condition, x, y, TensorProto.FLOAT, outdata) def verify_or(indata, dtype): x = indata[0].astype(dtype) y = indata[1].astype(dtype) outdata = np.logical_or(x, y) node = helper.make_node('Or', inputs=['in1', 'in2'], outputs=['out'], ) graph = helper.make_graph([node], 'or_test', inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)), helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))]) model = helper.make_model(graph, producer_name='or_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) def test_or(): # 2d x = (np.random.randn(3, 4) > 0) y = (np.random.randn(3, 4) > 0) verify_or(indata=[x, y], dtype=bool) # 3d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(3, 4, 5) > 0) verify_or(indata=[x, y], dtype=bool) # 4d x = (np.random.randn(3, 4, 5, 6) > 0) y = (np.random.randn(3, 4, 5, 6) > 0) verify_or(indata=[x, y], dtype=bool) # 3d vs 1d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(5) > 0) verify_or(indata=[x, y], dtype=bool) # 3d vs 2d x = (np.random.randn(3, 4, 5) > 0) y = (np.random.randn(4, 5) > 0) verify_or(indata=[x, y], dtype=bool) def test_batch_norm(): def verify_batch_norm(in_shape): batchnorm = onnx.helper.make_node('BatchNormalization', inputs=["x", "scale", "B", "mean", "var"], outputs=['Y']) graph = helper.make_graph([batchnorm], "batchnorm_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)), helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]), helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]), helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]), helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]), ], outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_shape))]) model = helper.make_model(graph, producer_name='batchnorm_test') for target, ctx in ctx_list(): x =
np.random.uniform(size=in_shape)
numpy.random.uniform
from itertools import product import numpy as np from numpy.linalg import lstsq from numpy.testing import assert_allclose import pandas as pd import pytest from linearmodels.panel.data import PanelData from linearmodels.panel.model import FamaMacBeth from linearmodels.shared.exceptions import ( InferenceUnavailableWarning, MissingValueWarning, ) from linearmodels.tests.panel._utility import ( access_attributes, assert_frame_similar, datatypes, generate_data, ) pytestmark = pytest.mark.filterwarnings( "ignore::linearmodels.shared.exceptions.MissingValueWarning" ) missing = [0.0, 0.20] has_const = [True, False] perms = list(product(missing, datatypes, has_const)) ids = ["-".join(str(param) for param in perms) for perm in perms] @pytest.fixture(params=perms, ids=ids) def data(request): missing, datatype, const = request.param return generate_data( missing, datatype, const=const, other_effects=1, ntk=(25, 200, 5) ) def test_fama_macbeth(data): res = FamaMacBeth(data.y, data.x).fit(debiased=True) y = PanelData(data.y) x = PanelData(data.x) missing = y.isnull | x.isnull y.drop(missing) x.drop(missing) y = y.dataframe x = x.dataframe times = y.index.levels[1] params = [] for t in times: _y = y.xs(t, level=1) _x = x.xs(t, level=1) if _x.shape[0] < _x.shape[1]: continue _x = _x.loc[_y.index] params.append(lstsq(_x.values, _y.values, rcond=None)[0]) params = np.array(params).squeeze() all_params = params params = params.mean(0) assert_allclose(params.squeeze(), res.params) assert_allclose(all_params, res.all_params.dropna(how="all")) e_params = all_params - params[None, :] ntime = e_params.shape[0] cov = e_params.T @ e_params / ntime / (ntime - 1) assert_allclose(cov, np.asarray(res.cov)) access_attributes(res) def test_unknown_cov_type(data): with pytest.raises(ValueError): FamaMacBeth(data.y, data.x).fit(cov_type="unknown") @pytest.mark.smoke def test_fama_macbeth_kernel_smoke(data): FamaMacBeth(data.y, data.x).fit(cov_type="kernel") FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="bartlett") FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="newey-west") FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="parzen") FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="qs") FamaMacBeth(data.y, data.x).fit(cov_type="kernel", bandwidth=3) res = FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="andrews") access_attributes(res) def test_fitted_effects_residuals(data): mod = FamaMacBeth(data.y, data.x) res = mod.fit() expected = mod.exog.values2d @ res.params.values expected = pd.DataFrame(expected, index=mod.exog.index, columns=["fitted_values"]) assert_allclose(res.fitted_values, expected) assert_frame_similar(res.fitted_values, expected) expected.iloc[:, 0] = mod.dependent.values2d - expected.values expected.columns = ["idiosyncratic"] assert_allclose(res.idiosyncratic, expected) assert_frame_similar(res.idiosyncratic, expected) expected.iloc[:, 0] = np.nan expected.columns = ["estimated_effects"] assert_allclose(res.estimated_effects, expected) assert_frame_similar(res.estimated_effects, expected) @pytest.mark.filterwarnings( "always::linearmodels.shared.exceptions.MissingValueWarning" ) def test_block_size_warnings(): y = np.arange(12.0)[:, None] x = np.ones((12, 3)) x[:, 1] = np.arange(12.0) x[:, 2] = np.arange(12.0) ** 2 idx = pd.MultiIndex.from_product( [["a", "b", "c"], pd.date_range("2000-1-1", periods=4)] ) y = pd.DataFrame(y, index=idx, columns=["y"]) x = pd.DataFrame(x, index=idx, columns=["x1", "x2", "x3"]) with pytest.warns(MissingValueWarning): FamaMacBeth(y.iloc[:11], x.iloc[:11]) with pytest.warns(InferenceUnavailableWarning): FamaMacBeth(y.iloc[::4], x.iloc[::4]) def test_block_size_error(): y = np.arange(12.0)[:, None] x =
np.ones((12, 2))
numpy.ones
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt def activation_statistics(init_func=lambda fan_in, fan_out:
np.random.randn(fan_in, fan_out)
numpy.random.randn
# -*- mode: python; coding: utf-8 -* # Copyright (c) 2018 Radio Astronomy Software Group # Licensed under the 2-clause BSD License from __future__ import absolute_import, division, print_function import numpy as np import warnings import copy from .uvbase import UVBase from . import parameter as uvp from . import utils as uvutils class UVCal(UVBase): """ A class defining calibration solutions Currently supported file types: calfits Attributes: UVParameter objects: For full list see UVCal Parameters (http://pyuvdata.readthedocs.io/en/latest/uvcal.html). Some are always required, some are required for certain cal_types and others are always optional. """ def __init__(self): self._Nfreqs = uvp.UVParameter('Nfreqs', description='Number of frequency channels', expected_type=int) self._Njones = uvp.UVParameter('Njones', description='Number of Jones calibration' 'parameters (Number of Jones matrix elements ' 'calculated in calibration).', expected_type=int) desc = ('Number of times with different calibrations calculated ' '(if a calibration is calculated over a range of integrations, ' 'this gives the number of separate calibrations along the time axis).') self._Ntimes = uvp.UVParameter('Ntimes', description=desc, expected_type=int) self._history = uvp.UVParameter('history', description='String of history, units English', form='str', expected_type=str) self._Nspws = uvp.UVParameter('Nspws', description='Number of spectral windows ' '(ie non-contiguous spectral chunks). ' 'More than one spectral window is not ' 'currently supported.', expected_type=int) desc = ('Time range (in JD) that cal solutions are valid for.' 'list: [start_time, end_time] in JD.') self._time_range = uvp.UVParameter('time_range', description=desc, form=2, expected_type=float) desc = ('Name of telescope. e.g. HERA. String.') self._telescope_name = uvp.UVParameter('telescope_name', description=desc, form='str', expected_type=str) desc = ('Number of antennas that have data associated with them ' '(i.e. length of ant_array), which may be smaller than the number' 'of antennas in the telescope (i.e. length of antenna_numbers).') self._Nants_data = uvp.UVParameter('Nants_data', description=desc, expected_type=int) desc = ('Number of antennas in the antenna_numbers array. May be larger ' 'than the number of antennas with gains associated with them.') self._Nants_telescope = uvp.UVParameter('Nants_telescope', description=desc, expected_type=int) desc = ('Array of integer antenna numbers that appear in self.gain_array, with shape (Nants_data,). ' 'This array is ordered to match the inherent ordering of the zeroth axis of self.gain_array.') self._ant_array = uvp.UVParameter('ant_array', description=desc, expected_type=int, form=('Nants_data',)) desc = ('Array of antenna names with shape (Nants_telescope,). ' 'Ordering of elements matches ordering of antenna_numbers.') self._antenna_names = uvp.UVParameter('antenna_names', description=desc, form=('Nants_telescope',), expected_type=str) desc = ('Array of all integer-valued antenna numbers in the telescope with shape (Nants_telescope,). ' 'Ordering of elements matches that of antenna_names. This array is not necessarily identical ' 'to ant_array, in that this array holds all antenna numbers associated with the telescope, not ' 'just antennas with data, and has an in principle non-specific ordering.') self._antenna_numbers = uvp.UVParameter('antenna_numbers', description=desc, form=('Nants_telescope',), expected_type=int) self._spw_array = uvp.UVParameter('spw_array', description='Array of spectral window ' 'numbers, shape (Nspws).', form=('Nspws',), expected_type=int) desc = 'Array of frequencies, center of the channel, shape (Nspws, Nfreqs), units Hz.' self._freq_array = uvp.UVParameter('freq_array', description=desc, form=('Nspws', 'Nfreqs'), expected_type=np.float, tols=1e-3) # mHz desc = ('Channel width of of a frequency bin. Units Hz.') self._channel_width = uvp.UVParameter('channel_width', description=desc, expected_type=np.float, tols=1e-3) desc = ('Array of antenna polarization integers, shape (Njones). ' 'linear pols -5:-8 (jxx, jyy, jxy, jyx).' 'circular pols -1:-4 (jrr, jll. jrl, jlr).') self._jones_array = uvp.UVParameter('jones_array', description=desc, expected_type=int, acceptable_vals=list(np.arange(-8, 0)), form=('Njones',)) desc = ('Array of calibration solution times, center of integration, ' 'shape (Ntimes), units Julian Date') self._time_array = uvp.UVParameter('time_array', description=desc, form=('Ntimes',), expected_type=np.float, tols=1e-3 / (60.0 * 60.0 * 24.0)) desc = ('Integration time of a time bin, units seconds.') self._integration_time = uvp.UVParameter('integration_time', description=desc, expected_type=np.float, tols=1e-3) # 1ms desc = ('The convention for applying the calibration solutions to data.' 'Values are "divide" or "multiply", indicating that to calibrate ' 'one should divide or multiply uncalibrated data by gains. ' 'Mathematically this indicates the alpha exponent in the equation: ' 'calibrated data = gain^alpha * uncalibrated data. A value of ' '"divide" represents alpha=-1 and "multiply" represents alpha=1.') self._gain_convention = uvp.UVParameter('gain_convention', form='str', expected_type=str, description=desc, acceptable_vals=['divide', 'multiply']) desc = ('Array of flags to be applied to calibrated data (logical OR ' 'of input and flag generated by calibration). True is flagged. ' 'Shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), type = bool.') self._flag_array = uvp.UVParameter('flag_array', description=desc, form=('Nants_data', 'Nspws', 'Nfreqs', 'Ntimes', 'Njones'), expected_type=np.bool) desc = ('Array of qualities of calibration solutions. ' 'The shape depends on cal_type, if the cal_type is "gain" or ' '"unknown", the shape is: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), ' 'if the cal_type is "delay", the shape is (Nants_data, Nspws, 1, Ntimes, Njones), ' 'type = float.') self._quality_array = uvp.UVParameter('quality_array', description=desc, form=('Nants_data', 'Nspws', 'Nfreqs', 'Ntimes', 'Njones'), expected_type=np.float) desc = ('Orientation of the physical dipole corresponding to what is ' 'labelled as the x polarization. Options are "east" ' '(indicating east/west orientation) and "north" (indicating ' 'north/south orientation)') self._x_orientation = uvp.UVParameter('x_orientation', description=desc, expected_type=str, acceptable_vals=['east', 'north']) # --- cal_type parameters --- desc = ('cal type parameter. Values are delay, gain or unknown.') self._cal_type = uvp.UVParameter('cal_type', form='str', expected_type=str, value='unknown', description=desc, acceptable_vals=['delay', 'gain', 'unknown']) desc = ('Required if cal_type = "gain". Array of gains, ' 'shape: (Nants_data, Nspws, Nfreqs, Ntimes, Njones), type = complex float.') self._gain_array = uvp.UVParameter('gain_array', description=desc, required=False, form=('Nants_data', 'Nspws', 'Nfreqs', 'Ntimes', 'Njones'), expected_type=np.complex) desc = ('Required if cal_type = "delay". Array of delays with units of seconds. ' 'Shape: (Nants_data, Nspws, 1, Ntimes, Njones), type = float.') self._delay_array = uvp.UVParameter('delay_array', description=desc, required=False, form=('Nants_data', 'Nspws', 1, 'Ntimes', 'Njones'), expected_type=np.float) desc = ('Required if cal_type = "delay". Frequency range that solutions are valid for.' 'list: [start_frequency, end_frequency] in Hz.') self._freq_range = uvp.UVParameter('freq_range', required=False, description=desc, form=2, expected_type=float, tols=1e-3) # --- cal_style parameters --- desc = ('Style of calibration. Values are sky or redundant.') self._cal_style = uvp.UVParameter('cal_style', form='str', expected_type=str, description=desc, acceptable_vals=['sky', 'redundant']) desc = ('Required if cal_style = "sky". Short string describing field ' 'center or dominant source.') self._sky_field = uvp.UVParameter('sky_field', form='str', required=False, expected_type=str, description=desc) desc = ('Required if cal_style = "sky". Name of calibration catalog.') self._sky_catalog = uvp.UVParameter('sky_catalog', form='str', required=False, expected_type=str, description=desc) desc = ('Required if cal_style = "sky". Phase reference antenna.') self._ref_antenna_name = uvp.UVParameter('ref_antenna_name', form='str', required=False, expected_type=str, description=desc) desc = ('Number of sources used.') self._Nsources = uvp.UVParameter('Nsources', required=False, expected_type=np.int, description=desc) desc = ('Range of baselines used for calibration.') self._baseline_range = uvp.UVParameter('baseline_range', form=2, required=False, expected_type=np.float, description=desc) desc = ('Name of diffuse model.') self._diffuse_model = uvp.UVParameter('diffuse_model', form='str', required=False, expected_type=str, description=desc) # --- truly optional parameters --- desc = ('The gain scale of the calibration, which indicates the units of the ' 'calibrated visibilities. For example, Jy or K str.') self._gain_scale = uvp.UVParameter('gain_scale', form='str', expected_type=str, description=desc, required=False) desc = ('Array of input flags, True is flagged. shape: (Nants_data, Nspws, ' 'Nfreqs, Ntimes, Njones), type = bool.') self._input_flag_array = uvp.UVParameter('input_flag_array', description=desc, required=False, form=('Nants_data', 'Nspws', 'Nfreqs', 'Ntimes', 'Njones'), expected_type=np.bool) desc = ('Origin (on github for e.g) of calibration software. Url and branch.') self._git_origin_cal = uvp.UVParameter('git_origin_cal', form='str', expected_type=str, description=desc, required=False) desc = ('Commit hash of calibration software (from git_origin_cal) used ' 'to generate solutions.') self._git_hash_cal = uvp.UVParameter('git_hash_cal', form='str', expected_type=str, description=desc, required=False) desc = ('Name of observer who calculated solutions in this file.') self._observer = uvp.UVParameter('observer', form='str', description=desc, expected_type=str, required=False) desc = ('Array of qualities of the calibration for entire arrays. ' 'The shape depends on cal_type, if the cal_type is "gain" or ' '"unknown", the shape is: (Nspws, Nfreqs, Ntimes, Njones), ' 'if the cal_type is "delay", the shape is (Nspws, 1, Ntimes, Njones), ' 'type = float.') self._total_quality_array = uvp.UVParameter('total_quality_array', description=desc, form=('Nspws', 'Nfreqs', 'Ntimes', 'Njones'), expected_type=np.float, required=False) desc = ('Any user supplied extra keywords, type=dict. Keys should be ' '8 character or less strings if writing to calfits files. ' 'Use the special key "comment" for long multi-line string comments.') self._extra_keywords = uvp.UVParameter('extra_keywords', required=False, description=desc, value={}, spoof_val={}, expected_type=dict) super(UVCal, self).__init__() def check(self, check_extra=True, run_check_acceptability=True): """ Check that all required parameters are set reasonably. Check that required parameters exist and have appropriate shapes. Optionally check if the values are acceptable. Args: run_check_acceptability: Option to check if values in required parameters are acceptable. Default is True. """ # Make sure requirements are set properly for cal_style if self.cal_style == 'sky': self.set_sky() elif self.cal_style == 'redundant': self.set_redundant() # check for deprecated x_orientation strings and convert to new values (if possible) if self.x_orientation is not None: if self.x_orientation not in self._x_orientation.acceptable_vals: warn_string = ('x_orientation {xval} is not one of [{vals}], ' .format(xval=self.x_orientation, vals=(', ').join(self._x_orientation.acceptable_vals))) if self.x_orientation.lower() == 'e': self.x_orientation = 'east' warn_string += 'converting to "east".' elif self.x_orientation.lower() == 'n': self.x_orientation = 'north' warn_string += 'converting to "north".' else: warn_string += 'cannot be converted.' warnings.warn(warn_string + ' Only [{vals}] will be supported ' 'starting in version 1.5' .format(vals=(', ').join(self._x_orientation.acceptable_vals)), DeprecationWarning) # first run the basic check from UVBase super(UVCal, self).check(check_extra=check_extra, run_check_acceptability=run_check_acceptability) # require that all entries in ant_array exist in antenna_numbers if not all(ant in self.antenna_numbers for ant in self.ant_array): raise ValueError('All antennas in ant_array must be in antenna_numbers.') # issue warning if extra_keywords keys are longer than 8 characters for key in self.extra_keywords.keys(): if len(key) > 8: warnings.warn('key {key} in extra_keywords is longer than 8 ' 'characters. It will be truncated to 8 if written ' 'to a calfits file format.'.format(key=key)) # issue warning if extra_keywords values are lists, arrays or dicts for key, value in self.extra_keywords.items(): if isinstance(value, (list, dict, np.ndarray)): warnings.warn('{key} in extra_keywords is a list, array or dict, ' 'which will raise an error when writing calfits ' 'files'.format(key=key)) return True def set_gain(self): """Set cal_type to 'gain' and adjust required parameters.""" self.cal_type = 'gain' self._gain_array.required = True self._delay_array.required = False self._freq_range.required = False self._quality_array.form = self._gain_array.form self._total_quality_array.form = self._gain_array.form[1:] def set_delay(self): """Set cal_type to 'delay' and adjust required parameters.""" self.cal_type = 'delay' self._gain_array.required = False self._delay_array.required = True self._freq_range.required = True self._quality_array.form = self._delay_array.form self._total_quality_array.form = self._delay_array.form[1:] def set_unknown_cal_type(self): """Set cal_type to 'unknown' and adjust required parameters.""" self.cal_type = 'unknown' self._gain_array.required = False self._delay_array.required = False self._freq_range.required = False self._quality_array.form = self._gain_array.form self._total_quality_array.form = self._gain_array.form[1:] def set_sky(self): """Set cal_style to 'sky' and adjust required parameters.""" self.cal_style = 'sky' self._sky_field.required = True self._sky_catalog.required = True self._ref_antenna_name.required = True def set_redundant(self): """Set cal_style to 'redundant' and adjust required parameters.""" self.cal_style = 'redundant' self._sky_field.required = False self._sky_catalog.required = False self._ref_antenna_name.required = False def select(self, antenna_nums=None, antenna_names=None, frequencies=None, freq_chans=None, times=None, jones=None, run_check=True, check_extra=True, run_check_acceptability=True, inplace=True): """ Select specific antennas, frequencies, times and jones polarization terms to keep in the object while discarding others. The history attribute on the object will be updated to identify the operations performed. Args: antenna_nums: The antennas numbers to keep in the object (antenna positions and names for the removed antennas will be retained). This cannot be provided if antenna_names is also provided. antenna_names: The antennas names to keep in the object (antenna positions and names for the removed antennas will be retained). This cannot be provided if antenna_nums is also provided. frequencies: The frequencies to keep in the object. freq_chans: The frequency channel numbers to keep in the object. times: The times to keep in the object. jones: The jones polarization terms to keep in the object. run_check: Option to check for the existence and proper shapes of required parameters after downselecting data on this object. Default is True. check_extra: Option to check shapes and types of optional parameters as well as required ones. Default is True. run_check_acceptability: Option to check acceptable range of the values of required parameters after downselecting data on this object. Default is True. inplace: Option to perform the select directly on self (True, default) or return a new UVCal object, which is a subselection of self (False) """ if inplace: cal_object = self else: cal_object = copy.deepcopy(self) # build up history string as we go history_update_string = ' Downselected to specific ' n_selects = 0 if antenna_names is not None: if antenna_nums is not None: raise ValueError('Only one of antenna_nums and antenna_names can be provided.') antenna_names = uvutils._get_iterable(antenna_names) antenna_nums = [] for s in antenna_names: if s not in cal_object.antenna_names: raise ValueError('Antenna name {a} is not present in the antenna_names array'.format(a=s)) ind = np.where(np.array(cal_object.antenna_names) == s)[0][0] antenna_nums.append(cal_object.antenna_numbers[ind]) if antenna_nums is not None: antenna_nums = uvutils._get_iterable(antenna_nums) history_update_string += 'antennas' n_selects += 1 ant_inds = np.zeros(0, dtype=np.int) for ant in antenna_nums: if ant in cal_object.ant_array: ant_inds = np.append(ant_inds, np.where(cal_object.ant_array == ant)[0]) else: raise ValueError('Antenna number {a} is not present in the ' ' array'.format(a=ant)) ant_inds = list(sorted(set(list(ant_inds)))) cal_object.Nants_data = len(ant_inds) cal_object.ant_array = cal_object.ant_array[ant_inds] cal_object.flag_array = cal_object.flag_array[ant_inds, :, :, :, :] cal_object.quality_array = cal_object.quality_array[ant_inds, :, :, :, :] if cal_object.cal_type == 'delay': cal_object.delay_array = cal_object.delay_array[ant_inds, :, :, :, :] else: cal_object.gain_array = cal_object.gain_array[ant_inds, :, :, :, :] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ant_inds, :, :, :, :] if cal_object.total_quality_array is not None: warnings.warn('Cannot preserve total_quality_array when changing ' 'number of antennas; discarding') cal_object.total_quality_array = None if times is not None: times = uvutils._get_iterable(times) if n_selects > 0: history_update_string += ', times' else: history_update_string += 'times' n_selects += 1 time_inds = np.zeros(0, dtype=np.int) for jd in times: if jd in cal_object.time_array: time_inds = np.append(time_inds, np.where(cal_object.time_array == jd)[0]) else: raise ValueError('Time {t} is not present in the time_array'.format(t=jd)) time_inds = list(sorted(set(list(time_inds)))) cal_object.Ntimes = len(time_inds) cal_object.time_array = cal_object.time_array[time_inds] if cal_object.Ntimes > 1: time_separation =
np.diff(cal_object.time_array)
numpy.diff
from epidag.fitting import BayesResult from epidag.fitting.alg.fitter import Fitter import numpy as np import numpy.random as rd from abc import ABCMeta, abstractmethod __author__ = 'TimeWz667' __all__ = ['ABCSMC'] class AbsStepper(metaclass=ABCMeta): def __init__(self, name, lo, up): self.Name = name self.Lower, self.Upper = lo, up @abstractmethod def propose(self, v, scale): pass class BinaryStepper(AbsStepper): def propose(self, v, scale): return v class DoubleStepper(AbsStepper): def propose(self, v, scale): return v + rd.normal() * scale class IntegerStepper(AbsStepper): def propose(self, v, scale): return np.round(rd.normal(v, scale)) class Steppers: def __init__(self, model): self.Nodes = dict() temp = model.sample_prior() for p in model.ParameterNodes: pp = model.BN[p].get_distribution(temp) if pp.Type == 'Binary': step = BinaryStepper(p, 0, 1) elif pp.Type == 'Integer': step = IntegerStepper(p, pp.Lower, pp.Upper) else: step = DoubleStepper(p, pp.Lower, pp.Upper) self.Nodes[p] = step def mutate(self, model, gene, scales): new = gene.clone() mutated = dict() for key, node in self.Nodes.items(): while True: proposed = node.propose(new[key], scales[key]) if node.Lower < proposed < node.Upper: mutated[key] = proposed break new.impulse(mutated) new.LogPrior = model.evaluate_prior(new) return new def wt_sd(vs, wts): vs = np.array(vs) wts = wts / wts.sum() mu = (wts * vs) / len(wts) return np.sqrt(np.sum(wts * (vs - mu) * (vs - mu))) class ABCSMC(Fitter): def __init__(self, name_logger="ABCSMC", alpha=0.9, p_thres=0.7): Fitter.__init__(self, name_logger, alpha=alpha, p_thres=p_thres) def fit(self, model, **kwargs): n_post = kwargs['n_post'] max_stay = kwargs['max_stay'] if 'max_stay' in kwargs else 5 max_round = kwargs['max_round'] if 'max_round' in kwargs else 20 trj = list() n_round = n_stay = 0 n_eval = n_post steppers = Steppers(model) self.info('Initialising') post, d0, wts, eps0 = self.__initialise(n_post, model) rec = { 'Round': n_round, 'Eval': n_eval, 'Eps':eps0, 'ESS': 1 / np.sum(wts * wts), 'ACC': 1 } self.info('Round {}, ESS {:0.0f}, Eps {:0.4g}, Acceptance {:.1f}%'.format( rec['Round'], rec['ESS'], rec['Eps'], rec['ACC'] * 100 )) trj.append(rec) # Iteration while True: # Update eps n_round += 1 eps1 = self.find_eps(d0, eps0) if eps1 > eps0: n_stay += 1 eps1 = eps0 else: n_stay = 0 # Step 1 Updating weight act_np0, a, wts = self.__update_weights(d0, wts, eps0, eps1) # Step 2 Resampling post, d1, wts = self.__resample(post, d0, wts) # Step 3 MH stepping post, d1, n_eval, acc = self.__step_mh(post, wts, d0, eps1, n_eval, steppers, model) rec = { 'Round': n_round, 'Eval': n_eval, 'Eps': eps1, 'ESS': 1 / np.sum(wts * wts), 'ACC': acc } trj.append(rec) self.info('Round {}, ESS {:0.4g}, Eps {:0.4g}, Acceptance {:.1f}%'.format( rec['Round'], rec['ESS'], rec['Eps'], rec['ACC'] * 100 )) if n_round >= max_round or (n_stay >= max_stay and n_round > 3): break eps0, d0 = eps1, d1 self.info('Completed') res = BayesResult(nodes=post, model=model, alg=self) res.Benchmarks.update(rec) res.Benchmarks['Niter'] = n_post return res def is_updatable(self): return True def update(self, res, **kwargs): pass def find_eps(self, ds, eps): e0 = sum(d for d in ds if d < eps) / len(ds) et = self.Parameters['alpha'] * e0 ds = list(ds) ds.sort(key = lambda x : -x) for eps1 in ds: e1 = sum(d for d in ds if d < eps1) / len(ds) if e1 <= et: return eps1 else: return eps def __initialise(self, n_post, model): post = list() while len(post) < n_post: p = model.sample_prior() p.LogPrior = model.evaluate_prior(p) di = model.evaluate_distance(p) if np.isfinite(di): p.LogLikelihood = - di post.append(p) d0 = [-p.LogLikelihood for p in post] wts = np.ones(n_post) / n_post eps0 = float('inf') return post, d0, wts, eps0 def __update_weights(self, ds, wts, eps0, eps1): ds = np.array(ds) act_np0 = ds < eps0 act_np1 = ds < eps1 a = act_np0 > 0 wts[a] *= act_np1[a] / act_np0[a] wts[1 - act_np0] = 0 wts /= wts.sum() return act_np0, a, wts def __resample(self, post, ds, wts): n_post = len(post) ess_thres = len(post) * self.Parameters['p_thres'] ds = np.array(ds) if ess_thres * sum(wts * wts) > 1: assert sum(wts > 0) > 2 alive = wts > 0 ind = [k for k, v in enumerate(alive) if v] re_ind = rd.choice(ind, size=n_post, replace=True, p=wts[wts > 0]) post = [post[i].clone() for i in re_ind] d1 = ds[re_ind] wts = np.ones(n_post) / n_post else: d1 = ds return post, d1, wts def __step_mh(self, post, wts, ds, eps1, n_eval, steppers, model): n_post = len(post) tau = {p: wt_sd([d[p] for d in post], wts) for p in model.ParameterNodes} post_p = list(post) dp = np.zeros(n_post) for i in range(n_post): di = float('inf') pars = post[i] while
np.isinf(di)
numpy.isinf
import numpy as np import pytest from numpy.testing import ( assert_, assert_allclose, assert_equal, assert_warns, assert_raises) from statsmodels.tsa.arima.datasets.brockwell_davis_2002 import lake, oshorts from statsmodels.tsa.arima.estimators.gls import gls @pytest.mark.low_precision('Test against Example 6.6.1 in Brockwell and Davis' ' (2016)') def test_brockwell_davis_example_661(): endog = oshorts.copy() exog = np.ones_like(endog) # Here we restrict the iterations to 1 and test against the values in the # text (set tolerance=1 to suppress to warning that it didn't converge) res, _ = gls(endog, exog, order=(0, 0, 1), max_iter=1, tolerance=1) assert_allclose(res.exog_params, -4.745, atol=1e-3) assert_allclose(res.ma_params, -0.818, atol=1e-3) assert_allclose(res.sigma2, 2041, atol=1) # Here we do not restrict the iterations and test against the values in # the last row of Table 6.2 (note: this table does not report sigma2) res, _ = gls(endog, exog, order=(0, 0, 1)) assert_allclose(res.exog_params, -4.780, atol=1e-3) assert_allclose(res.ma_params, -0.848, atol=1e-3) @pytest.mark.low_precision('Test against Example 6.6.2 in Brockwell and Davis' ' (2016)') def test_brockwell_davis_example_662(): endog = lake.copy() exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0] res, _ = gls(endog, exog, order=(2, 0, 0)) # Parameter values taken from Table 6.3 row 2, except for sigma2 and the # last digit of the exog_params[0], which were given in the text assert_allclose(res.exog_params, [10.091, -.0216], atol=1e-3) assert_allclose(res.ar_params, [1.005, -.291], atol=1e-3) assert_allclose(res.sigma2, .4571, atol=1e-3) def test_integrated(): # Get the lake data endog1 = lake.copy() exog1 = np.c_[np.ones_like(endog1), np.arange(1, len(endog1) + 1) * 1.0] endog2 = np.r_[0, np.cumsum(endog1)] exog2 = np.c_[[0, 0], np.cumsum(exog1, axis=0).T].T # Estimate without integration p1, _ = gls(endog1, exog1, order=(1, 0, 0)) # Estimate with integration with assert_warns(UserWarning): p2, _ = gls(endog2, exog2, order=(1, 1, 0)) assert_allclose(p1.params, p2.params) def test_integrated_invalid(): # Test for invalid versions of integrated model # - include_constant=True is invalid if integration is present endog = lake.copy() exog = np.arange(1, len(endog) + 1) * 1.0 assert_raises(ValueError, gls, endog, exog, order=(1, 1, 0), include_constant=True) def test_results(): endog = lake.copy() exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0] # Test for results output p, res = gls(endog, exog, order=(1, 0, 0)) assert_('params' in res) assert_('converged' in res) assert_('differences' in res) assert_('iterations' in res) assert_('arma_estimator' in res)
assert_('arma_results' in res)
numpy.testing.assert_
''' ========================================================================= Author: <NAME> Company: Filename: Last modifed: 06.04.2017 by <NAME> Description: Functional test ========================================================================= ========================================================================= ''' import numpy as np import ext3DLBPpy print("=============================================") test = "RD_LBP_P42g_R2" array = np.array([ [[24,71,216,122,134],[201,78,152,165,81],[86,204,66,35,60],[34,119,51,154,24],[99,186,118,117,11]], [[173,236,8,108,76],[102,188,23,116,75],[128,194,214,183,170],[28,60,176,35,2],[128,169,226,73,129]], [[10,188,199,151,145],[245,44,14,47,138],[109,113,133,216,164],[81,155,37,47,250],[170,32,152,33,5]], [[112,213,120,243,183],[23,234,98,219,188],[216,73,184,198,27],[240,145,5,3,11],[241,46,251,210,97]], [[138,80,145,230,158],[187,239,68,19,245],[147,165,106,237,218],[90,204,70,250,127],[167,169,162,129,58]] ], dtype=np.int32) mur = 194 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 0 failed!" print("RD_LBP_P42g_R2: test 0 passed!") array = np.array([ [[116,235,109,207,121],[244,97,251,165,68],[110,198,1,216,82],[169,10,46,9,58],[127,21,218,204,248]], [[254,227,152,219,117],[4,247,223,80,238],[170,113,34,234,24],[224,253,93,235,8],[2,60,145,79,43]], [[121,7,128,85,72],[212,20,236,228,38],[178,157,125,131,5],[78,199,43,243,120],[180,8,150,241,39]], [[62,242,230,56,38],[194,198,223,34,201],[98,99,238,185,26],[22,105,156,76,135],[236,142,109,7,103]], [[156,53,152,247,143],[1,37,174,237,254],[120,180,196,48,16],[65,245,162,90,13],[145,53,58,88,77]] ], dtype=np.int32) mur = 163 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 18; assert LBPcode==truth, "RD_LBP_P42g_R2: test 1 failed!" print("RD_LBP_P42g_R2: test 1 passed!") array = np.array([ [[129,53,10,247,219],[242,154,55,53,63],[69,71,94,238,22],[122,215,3,109,38],[121,11,193,144,70]], [[248,16,240,137,124],[70,208,250,151,90],[64,172,48,98,35],[29,191,211,67,236],[130,222,221,218,162]], [[12,225,156,0,51],[9,85,213,116,195],[127,155,19,144,245],[151,209,110,151,79],[115,54,2,114,94]], [[14,178,253,195,250],[29,176,228,103,234],[183,54,116,93,40],[32,234,124,82,84],[206,14,32,221,64]], [[166,181,244,88,84],[221,220,41,173,181],[58,235,96,8,9],[207,193,8,234,181],[151,25,90,149,237]] ], dtype=np.int32) mur = 60 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 2 failed!" print("RD_LBP_P42g_R2: test 2 passed!") array = np.array([ [[32,34,95,135,137],[237,64,177,152,164],[3,243,74,33,221],[99,114,191,203,222],[179,1,203,6,134]], [[156,154,13,191,187],[22,227,62,230,170],[177,27,153,123,2],[59,80,172,102,89],[158,5,66,126,131]], [[237,57,11,195,61],[221,77,7,117,175],[41,219,141,74,127],[152,124,173,215,112],[11,76,33,243,252]], [[98,108,90,146,27],[64,90,90,91,26],[148,19,6,85,247],[229,199,122,17,51],[62,75,216,130,112]], [[126,181,72,58,19],[133,153,0,195,200],[3,11,65,40,152],[78,28,149,161,205],[254,58,112,198,21]] ], dtype=np.int32) mur = 185 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 3 failed!" print("RD_LBP_P42g_R2: test 3 passed!") array = np.array([ [[11,225,62,178,161],[174,222,139,110,35],[208,58,217,28,72],[186,125,40,113,88],[117,205,216,62,92]], [[13,112,153,186,46],[106,33,169,195,206],[15,96,58,30,167],[104,237,136,40,14],[17,111,155,147,248]], [[252,79,13,5,131],[119,30,229,214,20],[242,148,222,6,119],[134,117,235,112,179],[94,208,253,61,44]], [[162,226,79,130,174],[253,42,217,69,24],[54,142,236,4,200],[194,235,148,114,101],[195,143,160,107,169]], [[240,26,89,150,104],[94,174,81,52,68],[39,134,147,163,249],[138,143,43,53,254],[115,252,92,211,176]] ], dtype=np.int32) mur = 240 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 15; assert LBPcode==truth, "RD_LBP_P42g_R2: test 4 failed!" print("RD_LBP_P42g_R2: test 4 passed!") array = np.array([ [[207,155,53,93,113],[130,102,10,209,250],[47,170,202,43,165],[86,4,90,13,27],[117,150,101,10,152]], [[172,87,15,85,212],[102,202,240,227,41],[116,203,35,217,83],[161,171,50,99,6],[68,78,204,201,92]], [[148,177,81,60,55],[159,68,108,39,199],[206,122,211,125,240],[57,54,85,202,190],[75,117,175,89,47]], [[97,138,62,81,57],[151,36,157,20,203],[230,232,47,231,203],[240,219,58,157,2],[114,92,62,96,179]], [[253,46,122,147,201],[64,39,234,52,250],[74,68,79,155,6],[139,89,180,144,99],[235,135,176,134,113]] ], dtype=np.int32) mur = 3 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 23; assert LBPcode==truth, "RD_LBP_P42g_R2: test 5 failed!" print("RD_LBP_P42g_R2: test 5 passed!") array = np.array([ [[154,189,16,197,163],[21,224,165,25,236],[169,97,140,90,60],[37,78,46,76,139],[115,43,47,121,153]], [[235,234,138,150,15],[136,58,107,115,3],[209,45,23,80,243],[61,21,45,175,139],[117,116,51,213,44]], [[151,52,209,131,114],[118,226,246,54,208],[130,194,138,202,128],[62,169,3,230,236],[62,61,11,170,96]], [[242,102,235,195,221],[132,28,215,146,119],[170,5,87,197,99],[42,77,99,121,58],[41,1,16,171,208]], [[120,91,6,64,223],[225,175,134,7,81],[243,241,50,124,239],[57,248,199,17,243],[203,44,148,157,181]] ], dtype=np.int32) mur = 156 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 19; assert LBPcode==truth, "RD_LBP_P42g_R2: test 6 failed!" print("RD_LBP_P42g_R2: test 6 passed!") array = np.array([ [[103,197,29,1,81],[64,71,182,211,74],[236,249,69,63,167],[18,50,230,253,224],[158,182,21,202,132]], [[179,153,74,239,19],[40,71,130,182,119],[219,102,134,222,60],[189,204,179,79,143],[154,31,137,16,221]], [[122,59,71,213,84],[119,15,139,188,177],[134,54,27,175,113],[127,252,193,103,106],[90,218,205,183,224]], [[112,109,170,206,152],[36,13,146,176,30],[23,134,25,114,133],[159,108,20,80,71],[40,83,198,31,204]], [[187,62,228,237,34],[90,220,92,134,6],[112,150,170,109,16],[0,121,165,113,0],[200,24,172,137,173]] ], dtype=np.int32) mur = 203 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 26; assert LBPcode==truth, "RD_LBP_P42g_R2: test 7 failed!" print("RD_LBP_P42g_R2: test 7 passed!") array = np.array([ [[59,143,67,220,242],[143,215,25,183,200],[36,195,223,169,247],[131,99,96,35,206],[77,104,252,246,90]], [[154,28,172,82,78],[118,0,151,219,192],[134,221,141,44,166],[20,34,194,23,13],[204,180,64,37,231]], [[118,62,91,2,77],[46,24,174,91,253],[248,122,106,166,91],[159,47,31,79,9],[181,185,64,20,223]], [[110,254,199,131,226],[121,25,22,120,157],[253,4,30,143,221],[160,14,200,41,144],[201,213,118,251,53]], [[241,224,189,87,226],[162,31,204,18,231],[49,244,15,240,182],[159,168,27,150,186],[186,7,155,249,183]] ], dtype=np.int32) mur = 113 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 23; assert LBPcode==truth, "RD_LBP_P42g_R2: test 8 failed!" print("RD_LBP_P42g_R2: test 8 passed!") array = np.array([ [[12,118,172,215,61],[200,39,97,44,197],[148,80,208,33,129],[249,71,117,117,243],[7,238,171,119,90]], [[77,45,134,240,174],[124,237,225,163,52],[118,175,14,104,195],[230,222,2,144,126],[21,130,223,240,222]], [[131,40,88,119,212],[123,2,193,143,218],[61,140,168,240,72],[191,141,122,212,136],[196,180,170,219,142]], [[197,152,87,129,60],[169,232,159,108,10],[130,9,140,62,59],[193,218,32,160,84],[234,49,146,74,47]], [[139,95,133,20,104],[197,185,89,229,233],[178,226,125,36,26],[36,84,4,224,143],[116,205,166,192,121]] ], dtype=np.int32) mur = 46 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 22; assert LBPcode==truth, "RD_LBP_P42g_R2: test 9 failed!" print("RD_LBP_P42g_R2: test 9 passed!") array = np.array([ [[28,223,132,44,87],[39,44,206,12,112],[54,3,48,63,12],[22,177,3,120,154],[111,235,88,154,56]], [[140,238,156,189,166],[17,174,123,65,151],[238,166,221,219,150],[254,172,3,237,134],[135,83,162,151,244]], [[24,209,14,153,220],[75,248,94,68,47],[93,80,40,61,15],[50,1,60,180,118],[224,84,218,105,107]], [[167,224,238,62,67],[51,126,117,94,124],[42,121,128,48,41],[32,90,242,8,129],[79,118,194,222,148]], [[1,70,211,174,239],[73,243,254,58,37],[99,195,147,98,80],[89,159,224,76,35],[121,75,44,203,54]] ], dtype=np.int32) mur = 48 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 27; assert LBPcode==truth, "RD_LBP_P42g_R2: test 10 failed!" print("RD_LBP_P42g_R2: test 10 passed!") array = np.array([ [[243,223,207,74,157],[16,141,221,47,154],[170,68,76,152,146],[220,242,43,155,123],[245,32,67,60,39]], [[19,204,76,189,33],[126,142,68,188,232],[90,235,68,40,247],[210,218,253,106,60],[160,135,46,129,145]], [[185,106,71,204,205],[83,173,240,236,206],[195,188,8,13,131],[219,189,214,145,187],[215,103,247,6,162]], [[94,104,71,98,160],[96,248,151,184,65],[135,250,165,34,86],[115,17,66,241,226],[67,149,5,241,166]], [[69,205,242,95,28],[161,21,201,204,105],[115,205,152,128,127],[137,125,120,215,13],[135,248,8,159,146]] ], dtype=np.int32) mur = 15 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 11 failed!" print("RD_LBP_P42g_R2: test 11 passed!") array = np.array([ [[64,48,214,158,140],[109,252,12,77,62],[241,196,135,203,193],[224,118,14,225,69],[5,165,4,241,238]], [[248,247,242,199,215],[230,67,244,170,120],[64,100,86,118,213],[192,26,159,124,162],[73,103,250,34,202]], [[177,144,191,93,208],[38,58,50,192,122],[23,64,45,193,159],[125,254,96,33,161],[251,217,204,42,135]], [[3,165,254,72,204],[142,14,9,118,203],[196,141,13,104,209],[223,54,51,147,102],[147,97,232,168,165]], [[106,184,22,87,115],[237,135,137,41,230],[245,177,135,97,43],[123,230,15,89,107],[234,216,61,209,117]] ], dtype=np.int32) mur = 155 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 12 failed!" print("RD_LBP_P42g_R2: test 12 passed!") array = np.array([ [[207,128,220,216,145],[175,8,37,60,121],[250,9,44,129,185],[146,121,57,110,72],[175,142,191,93,140]], [[73,245,9,28,224],[213,180,41,47,63],[142,182,75,105,245],[82,25,11,170,165],[146,97,165,165,58]], [[50,54,164,143,159],[99,106,97,76,215],[192,13,136,6,105],[218,146,234,207,14],[142,83,197,138,65]], [[41,175,1,174,160],[1,113,243,212,128],[200,203,151,100,150],[246,91,111,31,197],[190,57,63,61,205]], [[93,184,71,106,220],[223,95,191,163,11],[112,76,102,186,170],[122,137,35,73,105],[47,139,120,159,24]] ], dtype=np.int32) mur = 57 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 13 failed!" print("RD_LBP_P42g_R2: test 13 passed!") array = np.array([ [[161,82,239,42,19],[226,229,204,71,228],[173,47,104,201,231],[48,199,91,51,136],[159,166,198,226,143]], [[198,15,101,116,107],[53,186,103,154,40],[80,193,226,1,5],[27,182,47,110,80],[2,10,172,104,82]], [[48,194,85,118,132],[126,158,208,81,111],[68,104,67,146,16],[205,58,105,87,242],[7,245,242,149,185]], [[66,48,0,202,29],[132,110,205,247,160],[148,187,93,20,168],[213,109,108,145,138],[245,101,74,179,236]], [[42,28,43,83,2],[137,92,76,85,211],[144,161,254,11,189],[175,227,154,38,241],[50,12,54,53,114]] ], dtype=np.int32) mur = 133 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 14 failed!" print("RD_LBP_P42g_R2: test 14 passed!") array = np.array([ [[208,55,97,33,77],[194,39,39,121,194],[88,234,225,117,156],[85,174,191,253,182],[201,28,23,140,248]], [[109,186,209,81,31],[191,125,133,228,148],[28,226,6,248,86],[191,22,194,131,103],[48,95,207,25,73]], [[106,74,235,246,197],[146,152,67,159,55],[209,231,5,123,143],[135,94,185,10,37],[218,195,109,81,134]], [[102,83,44,241,110],[127,6,85,57,215],[96,24,117,1,94],[108,245,51,153,140],[16,107,115,249,254]], [[216,20,32,231,52],[17,247,95,175,182],[218,73,57,194,120],[251,217,122,214,94],[20,1,16,170,30]] ], dtype=np.int32) mur = 98 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 25; assert LBPcode==truth, "RD_LBP_P42g_R2: test 15 failed!" print("RD_LBP_P42g_R2: test 15 passed!") array = np.array([ [[198,9,91,100,228],[155,24,56,156,82],[8,148,19,73,52],[230,132,149,224,106],[155,0,218,59,146]], [[107,132,35,147,195],[8,169,248,191,29],[106,242,124,139,90],[8,20,228,121,82],[219,234,151,44,1]], [[120,140,20,60,226],[152,242,70,179,39],[210,208,17,173,47],[202,179,186,205,110],[174,73,54,178,251]], [[138,76,178,119,99],[75,28,140,85,179],[41,228,119,166,15],[246,186,150,242,223],[145,181,246,189,240]], [[219,47,31,119,210],[176,214,14,14,35],[137,52,242,9,96],[173,119,60,3,202],[235,120,141,219,106]] ], dtype=np.int32) mur = 150 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 16 failed!" print("RD_LBP_P42g_R2: test 16 passed!") array = np.array([ [[30,153,179,189,9],[54,177,30,105,171],[209,185,96,120,249],[146,145,46,245,92],[11,32,21,233,121]], [[45,169,9,229,210],[190,200,54,68,89],[7,37,249,127,11],[110,86,142,22,19],[38,165,129,72,242]], [[19,106,84,1,13],[135,26,243,29,50],[148,188,88,156,188],[71,136,53,170,47],[131,98,253,119,90]], [[107,21,11,181,173],[111,185,59,142,140],[136,157,15,194,158],[99,251,22,64,210],[231,57,210,59,96]], [[227,124,122,59,182],[103,185,244,32,29],[104,43,125,53,152],[169,50,239,1,176],[9,97,64,35,69]] ], dtype=np.int32) mur = 166 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 15; assert LBPcode==truth, "RD_LBP_P42g_R2: test 17 failed!" print("RD_LBP_P42g_R2: test 17 passed!") array = np.array([ [[208,124,154,122,18],[27,143,228,48,243],[208,180,187,45,98],[17,7,190,28,82],[214,192,189,107,195]], [[14,118,80,125,164],[179,19,161,139,182],[236,181,10,32,13],[218,144,220,46,206],[118,68,129,63,13]], [[2,15,224,63,121],[197,243,129,155,143],[203,78,99,78,51],[75,187,215,57,131],[245,26,71,179,25]], [[223,81,15,126,217],[32,124,231,226,209],[99,44,177,34,61],[178,152,120,63,224],[3,145,91,79,234]], [[44,70,236,119,99],[22,24,37,165,169],[126,170,98,52,175],[43,28,158,118,38],[207,10,125,231,78]] ], dtype=np.int32) mur = 225 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 18 failed!" print("RD_LBP_P42g_R2: test 18 passed!") array = np.array([ [[71,114,193,115,151],[18,23,82,120,145],[37,59,137,217,176],[237,196,153,200,166],[243,195,171,4,218]], [[36,168,26,105,23],[200,251,14,206,215],[84,238,49,208,234],[198,136,21,106,96],[85,36,52,196,235]], [[204,42,92,83,22],[230,29,136,154,241],[230,117,28,192,25],[44,43,25,254,73],[248,47,204,25,197]], [[222,223,247,163,199],[20,101,65,2,189],[0,42,220,222,150],[128,217,222,116,195],[204,121,114,227,232]], [[222,83,19,227,45],[82,254,91,201,98],[42,91,110,163,215],[173,51,231,12,132],[73,254,186,94,71]] ], dtype=np.int32) mur = 82 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 19 failed!" print("RD_LBP_P42g_R2: test 19 passed!") array = np.array([ [[203,238,183,117,64],[104,5,197,181,232],[112,133,170,98,27],[217,186,227,140,121],[137,105,172,172,89]], [[16,57,166,220,124],[203,62,157,169,128],[168,105,105,71,170],[128,120,146,109,201],[198,108,205,137,110]], [[253,133,53,190,22],[77,116,203,172,223],[21,97,26,133,152],[219,75,101,238,83],[41,81,209,15,23]], [[92,192,201,99,143],[151,223,129,173,183],[205,248,157,9,167],[87,95,237,37,16],[192,77,97,36,216]], [[111,24,142,115,157],[253,94,53,216,192],[115,138,247,102,164],[159,183,173,41,44],[34,245,78,5,130]] ], dtype=np.int32) mur = 18 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 27; assert LBPcode==truth, "RD_LBP_P42g_R2: test 20 failed!" print("RD_LBP_P42g_R2: test 20 passed!") array = np.array([ [[60,89,135,88,51],[239,250,21,212,223],[175,69,21,150,206],[160,207,239,184,76],[96,38,176,98,26]], [[219,222,7,157,16],[72,204,79,209,206],[234,227,161,89,31],[164,200,90,17,2],[99,199,52,30,79]], [[88,166,249,130,18],[0,101,32,144,236],[26,105,222,38,39],[36,193,56,49,117],[120,130,147,145,164]], [[61,53,207,101,17],[35,110,91,247,216],[172,92,38,225,169],[169,157,187,78,98],[95,189,212,248,217]], [[27,5,13,60,218],[59,183,130,148,143],[77,198,50,30,32],[65,137,79,145,69],[202,178,44,206,216]] ], dtype=np.int32) mur = 211 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 21 failed!" print("RD_LBP_P42g_R2: test 21 passed!") array = np.array([ [[58,122,105,172,0],[129,149,26,149,62],[62,234,222,234,205],[206,151,100,118,31],[47,248,148,174,189]], [[96,142,87,189,106],[141,190,120,162,110],[105,199,17,33,127],[7,186,143,81,12],[194,10,114,228,20]], [[31,228,111,254,165],[89,140,176,21,251],[20,3,59,99,216],[69,17,207,91,117],[136,237,9,105,211]], [[1,195,94,48,241],[85,146,154,184,128],[67,140,107,239,235],[1,45,252,99,229],[155,131,46,45,234]], [[134,171,219,133,95],[141,102,169,192,65],[134,249,185,99,3],[193,181,188,228,92],[234,212,165,40,209]] ], dtype=np.int32) mur = 223 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 22 failed!" print("RD_LBP_P42g_R2: test 22 passed!") array = np.array([ [[93,254,14,165,183],[139,192,178,16,64],[61,155,209,37,178],[107,80,52,204,44],[167,133,75,88,6]], [[13,108,219,56,27],[198,187,237,96,77],[117,10,27,25,249],[215,81,10,81,199],[231,192,69,157,238]], [[236,137,190,139,60],[74,225,9,139,241],[43,81,93,17,83],[29,140,142,118,116],[55,140,130,105,217]], [[250,152,46,186,57],[73,91,240,165,227],[104,32,44,122,236],[212,100,193,166,178],[197,202,132,7,61]], [[57,247,224,172,171],[112,45,224,137,13],[226,56,181,53,223],[223,240,140,71,176],[9,229,9,165,78]] ], dtype=np.int32) mur = 224 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 34; assert LBPcode==truth, "RD_LBP_P42g_R2: test 23 failed!" print("RD_LBP_P42g_R2: test 23 passed!") array = np.array([ [[35,183,246,137,193],[181,17,50,242,148],[160,17,210,41,62],[58,155,213,53,202],[114,111,230,156,69]], [[60,205,137,63,198],[198,92,157,154,173],[65,161,156,156,38],[140,206,90,79,56],[165,97,122,119,35]], [[99,112,119,7,199],[201,96,249,122,57],[243,178,213,242,58],[96,110,238,176,45],[107,189,145,89,226]], [[167,21,1,91,177],[239,19,158,6,176],[17,102,160,120,247],[46,151,253,118,227],[13,216,51,73,83]], [[97,89,167,143,11],[168,42,88,178,232],[179,168,191,63,175],[200,59,183,240,29],[141,86,116,63,208]] ], dtype=np.int32) mur = 34 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 24 failed!" print("RD_LBP_P42g_R2: test 24 passed!") array = np.array([ [[122,11,171,102,246],[104,69,138,16,165],[161,195,170,84,75],[16,91,213,208,211],[11,202,124,147,79]], [[212,184,92,77,144],[231,22,37,114,164],[59,219,211,76,188],[62,127,144,120,153],[81,13,226,105,176]], [[196,171,16,85,188],[190,194,178,202,252],[79,25,196,222,77],[162,142,250,224,159],[219,213,74,224,94]], [[240,188,96,144,40],[28,163,44,202,46],[33,10,190,136,113],[32,171,36,28,42],[166,92,112,66,131]], [[116,252,72,23,221],[215,48,114,229,191],[239,171,157,218,167],[61,15,70,199,71],[9,22,46,76,158]] ], dtype=np.int32) mur = 51 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 25 failed!" print("RD_LBP_P42g_R2: test 25 passed!") array = np.array([ [[165,194,204,67,65],[168,85,21,31,10],[123,225,174,36,211],[152,16,111,32,113],[95,33,61,14,171]], [[80,91,7,117,45],[172,110,126,198,29],[56,194,231,148,175],[46,199,123,237,203],[224,207,11,233,102]], [[106,0,118,234,208],[3,198,79,165,131],[44,198,129,38,95],[15,119,63,66,20],[3,232,55,243,162]], [[24,6,97,22,247],[36,94,34,101,99],[7,193,35,200,212],[181,3,13,48,14],[199,183,226,243,60]], [[1,216,143,40,196],[57,182,228,71,242],[138,140,95,159,95],[111,82,125,139,159],[144,190,101,52,106]] ], dtype=np.int32) mur = 65 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 26 failed!" print("RD_LBP_P42g_R2: test 26 passed!") array = np.array([ [[79,169,23,33,222],[57,111,94,78,191],[61,250,81,159,245],[1,220,123,109,123],[117,194,43,71,34]], [[237,0,196,217,110],[130,193,196,136,134],[152,221,74,21,7],[204,58,88,78,210],[190,12,180,226,245]], [[61,119,215,114,51],[118,68,107,26,165],[83,196,29,36,233],[165,126,149,225,151],[185,127,158,131,147]], [[218,118,155,197,148],[251,63,114,1,45],[237,120,219,181,226],[46,43,195,38,48],[232,1,62,78,202]], [[229,60,166,185,179],[50,117,33,138,153],[33,122,69,189,239],[22,160,171,97,151],[111,162,233,100,77]] ], dtype=np.int32) mur = 107 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 22; assert LBPcode==truth, "RD_LBP_P42g_R2: test 27 failed!" print("RD_LBP_P42g_R2: test 27 passed!") array = np.array([ [[18,249,201,130,135],[239,55,70,44,148],[140,42,173,50,161],[167,180,191,43,2],[223,59,193,40,148]], [[249,114,54,55,128],[95,90,217,242,70],[182,192,26,30,61],[6,66,96,30,218],[114,228,64,75,44]], [[187,10,210,91,134],[50,10,6,175,47],[24,172,253,5,234],[10,161,186,114,69],[29,199,124,35,74]], [[106,68,81,188,180],[253,219,87,126,17],[253,149,28,9,86],[200,59,40,4,188],[58,107,216,73,180]], [[125,105,141,253,183],[52,45,26,51,54],[224,249,244,97,209],[83,180,12,54,137],[149,174,116,152,151]] ], dtype=np.int32) mur = 232 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 28 failed!" print("RD_LBP_P42g_R2: test 28 passed!") array = np.array([ [[21,122,36,241,181],[118,76,16,122,254],[252,48,170,95,137],[149,230,89,38,12],[148,189,24,161,59]], [[3,52,123,56,118],[229,245,100,130,187],[36,39,157,8,201],[205,138,212,112,215],[50,142,34,112,105]], [[154,46,181,42,192],[127,71,254,125,187],[161,129,65,217,60],[80,184,253,217,134],[87,254,226,145,184]], [[38,127,154,131,165],[206,220,8,196,229],[47,108,41,0,156],[81,189,178,97,20],[188,61,47,85,124]], [[74,75,5,196,199],[19,142,182,98,108],[159,211,155,215,0],[59,20,56,67,207],[153,127,71,124,70]] ], dtype=np.int32) mur = 160 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 20; assert LBPcode==truth, "RD_LBP_P42g_R2: test 29 failed!" print("RD_LBP_P42g_R2: test 29 passed!") array = np.array([ [[30,53,132,5,117],[244,194,218,88,78],[126,67,120,35,51],[243,201,50,4,128],[4,250,199,124,11]], [[15,207,110,53,123],[251,100,44,131,31],[104,131,174,87,249],[177,107,175,25,18],[249,186,184,31,194]], [[200,34,10,59,188],[1,241,235,149,72],[192,161,155,122,92],[89,154,94,112,84],[14,42,154,93,74]], [[219,58,50,114,48],[191,81,169,242,175],[63,0,188,5,153],[193,207,236,203,232],[57,36,206,251,139]], [[218,187,51,219,247],[218,193,236,30,102],[160,54,168,166,156],[112,167,237,153,156],[80,72,164,19,144]] ], dtype=np.int32) mur = 251 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 30 failed!" print("RD_LBP_P42g_R2: test 30 passed!") array = np.array([ [[40,97,199,116,240],[150,88,79,136,217],[201,151,80,173,64],[196,49,155,123,196],[106,180,149,165,227]], [[173,220,161,171,231],[159,52,127,17,166],[86,74,161,108,41],[221,229,177,221,95],[228,238,194,175,88]], [[93,127,217,41,9],[58,16,0,41,192],[194,142,61,181,161],[200,58,209,134,2],[106,214,158,61,239]], [[101,37,234,160,47],[201,103,22,178,65],[156,135,193,134,165],[26,47,118,45,39],[145,6,16,66,183]], [[222,243,125,68,198],[147,57,117,249,225],[32,40,187,172,50],[12,221,122,106,215],[143,165,228,22,229]] ], dtype=np.int32) mur = 3 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 22; assert LBPcode==truth, "RD_LBP_P42g_R2: test 31 failed!" print("RD_LBP_P42g_R2: test 31 passed!") array = np.array([ [[166,231,148,162,204],[162,54,71,103,84],[161,8,141,148,2],[28,215,41,99,178],[253,69,153,121,45]], [[37,162,215,7,67],[32,159,138,228,64],[58,145,9,94,189],[168,56,48,235,202],[173,81,79,109,244]], [[40,208,194,63,230],[225,209,184,152,56],[192,144,246,77,76],[209,165,69,36,49],[87,117,110,212,138]], [[155,99,110,209,84],[201,237,111,205,99],[90,208,197,148,221],[241,250,98,206,247],[78,91,243,131,37]], [[90,97,181,210,78],[248,45,254,171,179],[177,31,143,191,122],[49,3,174,55,207],[192,119,67,230,168]] ], dtype=np.int32) mur = 95 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 32 failed!" print("RD_LBP_P42g_R2: test 32 passed!") array = np.array([ [[246,83,18,248,200],[75,157,193,39,130],[16,169,90,186,127],[191,77,193,115,90],[35,167,24,92,183]], [[178,209,219,71,224],[39,98,41,172,223],[73,75,244,226,5],[176,161,129,12,182],[178,184,220,45,206]], [[68,203,184,215,91],[185,64,20,105,16],[11,166,188,6,173],[217,99,87,216,207],[165,130,211,183,27]], [[60,49,195,12,36],[13,109,76,75,14],[33,181,94,34,142],[23,221,177,149,78],[8,23,228,83,215]], [[76,42,51,222,184],[31,110,185,37,41],[4,180,60,101,54],[41,47,130,30,84],[174,131,65,42,248]] ], dtype=np.int32) mur = 129 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 23; assert LBPcode==truth, "RD_LBP_P42g_R2: test 33 failed!" print("RD_LBP_P42g_R2: test 33 passed!") array = np.array([ [[148,108,111,26,164],[181,100,184,99,232],[194,127,35,47,34],[63,184,239,230,116],[89,77,169,30,75]], [[114,71,183,70,184],[67,35,83,103,94],[111,166,124,240,222],[138,244,103,198,63],[119,161,11,0,82]], [[102,29,120,54,195],[184,219,192,22,138],[6,90,119,125,175],[116,196,233,136,164],[148,10,241,148,253]], [[237,10,5,231,190],[52,38,202,88,1],[39,65,115,86,233],[55,177,152,32,211],[165,16,41,63,59]], [[55,194,159,133,163],[84,141,3,203,7],[198,230,80,100,125],[125,132,247,195,74],[50,190,229,31,229]] ], dtype=np.int32) mur = 177 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 16; assert LBPcode==truth, "RD_LBP_P42g_R2: test 34 failed!" print("RD_LBP_P42g_R2: test 34 passed!") array = np.array([ [[68,99,55,202,69],[253,22,58,110,40],[6,40,190,110,29],[152,195,230,215,211],[149,56,120,91,210]], [[165,72,17,184,149],[196,132,133,139,173],[0,61,241,93,166],[56,238,239,190,72],[138,121,148,53,151]], [[171,154,148,83,244],[49,120,14,192,211],[244,53,254,116,249],[97,238,128,105,135],[123,177,231,28,81]], [[39,123,138,229,242],[241,62,229,178,241],[84,216,197,175,194],[27,67,126,211,182],[201,143,17,241,168]], [[136,198,139,246,210],[243,130,89,212,169],[26,181,198,31,45],[90,66,69,197,115],[86,214,213,186,214]] ], dtype=np.int32) mur = 128 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 18; assert LBPcode==truth, "RD_LBP_P42g_R2: test 35 failed!" print("RD_LBP_P42g_R2: test 35 passed!") array = np.array([ [[224,59,227,23,14],[34,30,184,181,131],[181,254,219,247,43],[157,120,193,120,192],[198,34,103,128,41]], [[186,192,8,12,46],[244,178,86,194,65],[166,215,109,42,207],[122,218,215,161,237],[131,54,226,17,78]], [[44,171,157,248,94],[15,215,5,190,247],[209,79,76,176,105],[206,40,125,170,171],[6,137,43,101,139]], [[147,127,238,120,156],[250,239,72,140,1],[10,33,1,51,200],[176,187,24,108,218],[243,111,3,102,233]], [[142,132,65,3,201],[129,206,52,226,144],[37,97,141,190,184],[52,217,92,18,73],[58,194,6,64,251]] ], dtype=np.int32) mur = 149 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 36 failed!" print("RD_LBP_P42g_R2: test 36 passed!") array = np.array([ [[33,103,198,159,216],[46,70,94,32,215],[149,5,252,142,226],[175,238,82,60,78],[187,67,249,133,72]], [[158,239,17,132,181],[198,86,110,191,22],[123,175,42,136,222],[89,72,201,2,44],[190,136,20,150,115]], [[161,8,124,111,211],[244,103,186,59,190],[105,148,220,58,179],[175,54,244,239,196],[180,134,3,164,230]], [[61,176,244,208,144],[235,22,21,185,89],[168,209,223,178,224],[225,154,32,134,247],[249,20,229,222,48]], [[241,20,140,15,79],[97,32,218,127,64],[237,65,118,112,92],[192,164,50,101,212],[2,208,139,92,89]] ], dtype=np.int32) mur = 168 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 16; assert LBPcode==truth, "RD_LBP_P42g_R2: test 37 failed!" print("RD_LBP_P42g_R2: test 37 passed!") array = np.array([ [[182,44,56,19,129],[57,92,154,102,144],[143,152,193,150,149],[241,204,237,77,218],[63,11,29,80,66]], [[163,109,53,145,132],[217,165,39,113,230],[32,253,41,239,109],[219,44,139,170,3],[112,9,47,201,47]], [[173,203,112,134,94],[17,178,105,114,29],[136,176,145,123,94],[241,56,248,249,203],[228,233,47,22,61]], [[49,202,7,66,81],[216,192,187,227,88],[100,141,91,57,173],[219,177,94,76,235],[249,116,52,218,135]], [[15,208,77,24,62],[253,242,123,36,249],[210,100,207,109,215],[223,1,23,92,213],[179,108,137,232,19]] ], dtype=np.int32) mur = 171 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 18; assert LBPcode==truth, "RD_LBP_P42g_R2: test 38 failed!" print("RD_LBP_P42g_R2: test 38 passed!") array = np.array([ [[77,196,246,102,179],[195,246,90,138,151],[47,47,248,42,50],[50,210,12,248,144],[123,173,139,128,168]], [[84,216,159,202,143],[246,246,51,230,83],[137,116,24,163,195],[114,51,108,26,36],[211,215,53,50,185]], [[96,135,75,56,183],[142,177,105,163,183],[12,148,122,168,243],[71,252,84,4,177],[251,254,74,134,179]], [[105,203,144,248,237],[157,146,46,253,171],[54,147,115,120,151],[206,233,52,156,119],[210,87,158,239,247]], [[102,239,73,132,219],[197,242,182,29,156],[112,146,45,103,210],[228,169,90,22,188],[5,112,100,113,157]] ], dtype=np.int32) mur = 252 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 39 failed!" print("RD_LBP_P42g_R2: test 39 passed!") array = np.array([ [[10,82,131,132,115],[206,201,236,160,182],[107,242,99,111,38],[122,141,47,182,134],[109,180,16,128,212]], [[142,238,207,144,30],[158,230,69,136,114],[105,31,173,215,117],[116,118,153,153,215],[221,225,135,51,116]], [[39,202,194,210,183],[4,97,138,92,44],[150,254,22,145,122],[115,225,24,126,7],[164,89,20,13,159]], [[202,135,225,158,166],[132,49,74,76,17],[221,180,10,106,98],[7,9,92,24,2],[235,104,68,79,20]], [[180,111,0,52,141],[34,37,205,17,155],[89,28,5,174,84],[220,222,173,230,22],[13,160,101,106,67]] ], dtype=np.int32) mur = 181 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 40 failed!" print("RD_LBP_P42g_R2: test 40 passed!") array = np.array([ [[119,249,59,146,67],[104,246,221,150,132],[123,4,64,150,22],[170,227,73,180,103],[88,15,76,71,238]], [[219,189,135,29,20],[232,91,72,222,104],[170,217,115,156,8],[81,47,66,164,90],[137,115,215,190,244]], [[250,41,200,118,8],[181,178,223,206,16],[184,252,6,163,36],[225,176,107,22,35],[94,53,120,183,11]], [[155,215,160,53,154],[120,149,101,98,47],[121,54,183,186,165],[155,213,128,151,16],[187,205,253,196,235]], [[241,196,118,244,110],[128,153,106,52,76],[63,77,96,33,34],[126,76,47,38,214],[28,27,101,72,138]] ], dtype=np.int32) mur = 45 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 41 failed!" print("RD_LBP_P42g_R2: test 41 passed!") array = np.array([ [[219,143,75,123,45],[65,233,55,100,79],[117,69,115,210,53],[182,115,242,39,242],[179,125,101,26,27]], [[7,209,160,136,2],[180,53,154,33,54],[70,19,97,65,247],[137,3,184,18,162],[142,241,252,253,225]], [[89,224,74,192,226],[136,73,61,202,162],[138,20,76,24,87],[154,118,44,1,184],[135,66,81,182,149]], [[236,183,132,241,1],[219,222,103,212,75],[163,37,245,250,11],[233,200,213,200,29],[245,24,172,196,43]], [[17,179,136,147,210],[3,159,70,62,122],[19,59,224,151,114],[211,25,81,81,71],[13,199,6,106,251]] ], dtype=np.int32) mur = 43 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 32; assert LBPcode==truth, "RD_LBP_P42g_R2: test 42 failed!" print("RD_LBP_P42g_R2: test 42 passed!") array = np.array([ [[5,190,5,113,165],[146,157,77,136,60],[110,243,3,148,148],[54,23,201,248,212],[129,169,55,41,107]], [[68,180,95,110,40],[89,217,145,178,51],[251,178,74,1,103],[38,112,26,254,122],[200,236,91,42,7]], [[133,118,46,81,159],[135,119,190,181,192],[102,52,209,187,87],[240,230,101,170,90],[50,184,146,186,197]], [[127,240,249,205,157],[201,168,44,180,216],[223,179,223,126,68],[243,148,112,226,170],[214,233,95,218,179]], [[77,201,179,222,220],[194,113,44,254,112],[190,106,58,1,219],[172,69,108,164,8],[30,162,181,168,154]] ], dtype=np.int32) mur = 106 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 43 failed!" print("RD_LBP_P42g_R2: test 43 passed!") array = np.array([ [[225,58,111,126,155],[157,142,183,214,76],[122,46,30,172,220],[52,182,89,87,55],[244,26,200,87,129]], [[167,17,50,165,243],[214,108,131,79,56],[235,13,221,62,126],[145,225,23,169,94],[168,125,25,250,232]], [[240,226,63,147,13],[201,108,124,10,87],[48,238,250,85,148],[83,59,2,126,191],[248,127,39,105,190]], [[102,232,7,187,69],[71,164,147,31,183],[60,186,180,133,183],[231,199,224,129,214],[63,52,75,173,206]], [[116,230,132,191,37],[215,20,245,220,141],[120,135,10,48,220],[161,196,119,168,14],[17,243,66,105,223]] ], dtype=np.int32) mur = 91 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 22; assert LBPcode==truth, "RD_LBP_P42g_R2: test 44 failed!" print("RD_LBP_P42g_R2: test 44 passed!") array = np.array([ [[206,31,146,226,113],[201,104,221,152,34],[209,152,47,230,163],[1,36,53,129,41],[15,167,22,117,200]], [[134,47,213,102,63],[19,214,252,49,119],[99,249,169,68,176],[91,57,233,6,134],[143,188,177,172,161]], [[56,58,244,42,229],[173,141,215,21,167],[48,237,39,142,169],[213,216,31,37,71],[100,185,218,109,26]], [[145,220,11,2,41],[105,229,116,153,113],[13,121,88,54,82],[237,74,144,171,141],[31,49,253,187,30]], [[31,194,82,171,3],[18,223,8,139,9],[216,103,132,105,22],[80,218,156,228,182],[210,202,217,31,164]] ], dtype=np.int32) mur = 168 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 24; assert LBPcode==truth, "RD_LBP_P42g_R2: test 45 failed!" print("RD_LBP_P42g_R2: test 45 passed!") array = np.array([ [[193,242,5,41,242],[85,112,81,110,96],[148,182,33,73,206],[88,181,173,22,2],[78,238,170,143,248]], [[59,58,80,208,242],[31,33,200,169,62],[252,55,133,216,193],[80,161,18,158,69],[173,27,105,55,110]], [[130,214,134,119,79],[49,83,143,21,128],[165,148,115,243,87],[150,212,165,16,23],[228,148,6,240,13]], [[31,140,228,163,153],[100,191,239,147,165],[32,221,160,92,83],[35,117,222,134,128],[65,47,54,81,41]], [[61,48,185,213,41],[29,197,94,165,104],[151,7,131,239,60],[211,113,236,69,68],[228,48,251,212,64]] ], dtype=np.int32) mur = 64 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 46 failed!" print("RD_LBP_P42g_R2: test 46 passed!") array = np.array([ [[102,66,170,247,124],[121,188,124,150,1],[142,177,214,37,187],[222,94,155,61,70],[152,240,118,15,237]], [[233,230,28,182,20],[119,89,98,29,65],[215,95,8,68,127],[1,160,156,252,14],[98,238,167,127,56]], [[199,121,4,64,20],[130,103,63,123,147],[96,165,32,188,101],[196,157,216,186,21],[34,249,179,163,221]], [[2,202,5,155,184],[33,108,50,230,12],[117,172,224,8,97],[209,166,159,90,137],[27,230,105,253,22]], [[186,249,34,185,126],[43,100,231,233,228],[196,228,118,147,9],[243,167,127,247,83],[123,225,153,208,155]] ], dtype=np.int32) mur = 143 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 25; assert LBPcode==truth, "RD_LBP_P42g_R2: test 47 failed!" print("RD_LBP_P42g_R2: test 47 passed!") array = np.array([ [[90,187,50,83,174],[102,120,103,125,117],[19,138,142,100,207],[166,152,162,161,79],[82,229,187,137,131]], [[153,230,62,237,38],[13,35,221,134,20],[159,133,26,26,251],[112,19,200,66,117],[85,91,167,85,19]], [[204,89,161,219,7],[58,229,219,153,87],[244,11,10,226,28],[164,48,91,124,140],[91,75,159,48,122]], [[132,189,66,23,182],[90,156,80,66,104],[28,76,242,68,184],[144,75,151,156,135],[146,183,98,208,152]], [[228,116,188,200,187],[180,249,93,166,96],[36,136,100,179,242],[83,95,83,96,183],[13,202,213,2,113]] ], dtype=np.int32) mur = 118 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 23; assert LBPcode==truth, "RD_LBP_P42g_R2: test 48 failed!" print("RD_LBP_P42g_R2: test 48 passed!") array = np.array([ [[132,35,25,129,141],[28,84,234,230,133],[188,105,239,180,68],[198,154,77,76,41],[192,210,163,218,180]], [[6,150,127,198,155],[85,119,47,173,89],[26,4,103,107,252],[176,225,30,176,86],[30,225,4,245,20]], [[189,244,190,132,176],[108,157,42,157,25],[231,112,2,120,97],[164,216,208,17,95],[245,231,91,47,201]], [[41,166,134,243,117],[233,199,149,152,48],[149,132,249,122,215],[190,12,175,156,249],[48,220,123,46,229]], [[184,120,63,116,22],[186,223,62,195,248],[158,40,24,94,106],[186,212,134,27,167],[8,9,97,55,93]] ], dtype=np.int32) mur = 24 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 26; assert LBPcode==truth, "RD_LBP_P42g_R2: test 49 failed!" print("RD_LBP_P42g_R2: test 49 passed!") array = np.array([ [[218,177,126,33,177],[30,177,104,195,206],[237,247,85,118,29],[170,49,76,220,111],[146,78,45,136,213]], [[11,193,194,81,181],[159,173,159,159,174],[48,181,144,134,16],[83,244,219,193,246],[4,85,47,28,71]], [[137,141,251,178,27],[140,103,59,94,154],[150,246,139,176,195],[189,117,89,222,240],[54,0,83,81,107]], [[174,36,86,98,196],[243,126,206,222,189],[147,251,203,130,156],[126,164,211,62,160],[124,72,126,239,72]], [[208,28,128,229,72],[50,230,132,22,192],[115,76,63,188,167],[181,87,239,150,211],[162,38,198,36,213]] ], dtype=np.int32) mur = 237 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 16; assert LBPcode==truth, "RD_LBP_P42g_R2: test 50 failed!" print("RD_LBP_P42g_R2: test 50 passed!") array = np.array([ [[245,179,200,150,65],[227,23,227,235,220],[189,153,80,220,7],[123,79,77,100,199],[83,187,132,130,28]], [[109,51,179,125,103],[103,116,217,122,80],[248,252,175,99,6],[144,42,203,236,17],[162,83,89,63,195]], [[40,34,72,113,147],[69,157,188,17,247],[50,233,199,235,18],[59,200,88,60,137],[211,136,218,184,98]], [[233,49,0,119,8],[22,182,18,243,69],[197,50,125,172,10],[220,185,86,121,120],[47,98,252,176,174]], [[100,108,80,30,88],[119,152,53,99,28],[196,28,44,88,184],[165,153,189,14,142],[210,170,130,151,215]] ], dtype=np.int32) mur = 167 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 51 failed!" print("RD_LBP_P42g_R2: test 51 passed!") array = np.array([ [[146,110,217,216,84],[93,197,19,129,116],[239,241,232,28,31],[214,174,110,143,63],[14,129,168,228,250]], [[169,156,166,7,105],[133,80,99,252,10],[73,175,122,145,173],[34,25,65,159,252],[53,104,7,212,80]], [[109,88,144,211,212],[130,68,81,252,57],[179,199,12,92,101],[32,82,44,19,163],[111,163,198,182,131]], [[84,235,219,124,224],[116,230,94,206,74],[124,15,254,87,35],[56,244,70,106,218],[68,182,162,68,250]], [[251,107,146,73,29],[241,95,238,27,208],[40,197,68,10,4],[23,127,143,245,149],[25,244,28,32,19]] ], dtype=np.int32) mur = 199 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 27; assert LBPcode==truth, "RD_LBP_P42g_R2: test 52 failed!" print("RD_LBP_P42g_R2: test 52 passed!") array = np.array([ [[252,231,135,73,252],[240,183,168,55,122],[149,91,39,171,66],[99,14,195,172,117],[201,86,6,153,140]], [[60,32,190,181,100],[150,125,79,175,17],[129,179,125,240,202],[138,45,38,114,6],[212,78,165,6,22]], [[187,22,219,77,56],[200,156,19,61,178],[35,21,102,117,234],[131,120,36,112,173],[125,70,233,74,4]], [[6,80,158,8,50],[47,238,65,62,207],[21,2,177,172,36],[67,100,207,207,111],[120,123,188,16,160]], [[101,11,1,62,101],[206,177,35,138,95],[37,219,132,156,57],[12,47,229,111,30],[129,140,112,82,149]] ], dtype=np.int32) mur = 209 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 53 failed!" print("RD_LBP_P42g_R2: test 53 passed!") array = np.array([ [[193,20,209,83,61],[3,34,102,32,81],[102,244,12,9,156],[41,172,153,252,18],[191,226,15,91,73]], [[23,230,22,96,15],[89,158,126,181,177],[48,138,228,233,90],[250,84,156,197,66],[74,7,232,155,102]], [[21,240,98,151,53],[214,193,96,163,99],[242,168,86,160,18],[177,24,191,244,104],[153,164,24,152,18]], [[51,47,180,216,183],[132,79,3,39,160],[184,252,118,41,236],[89,170,196,77,184],[45,192,178,196,227]], [[248,32,28,177,63],[136,229,178,131,51],[102,79,202,36,191],[66,121,50,54,230],[50,181,86,87,180]] ], dtype=np.int32) mur = 148 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 17; assert LBPcode==truth, "RD_LBP_P42g_R2: test 54 failed!" print("RD_LBP_P42g_R2: test 54 passed!") array = np.array([ [[24,95,252,157,201],[202,245,65,230,31],[134,149,113,136,154],[107,8,67,240,109],[90,190,189,59,191]], [[185,40,172,229,250],[195,159,53,123,24],[201,125,109,63,3],[126,64,230,168,88],[112,188,140,11,189]], [[165,36,42,150,92],[53,124,32,126,181],[56,219,155,5,182],[81,21,105,189,67],[223,27,101,248,237]], [[17,37,77,186,145],[25,135,203,82,184],[153,203,200,209,245],[63,192,38,150,145],[246,45,217,101,36]], [[175,36,89,59,101],[123,49,96,174,36],[82,204,106,244,177],[2,149,53,194,158],[147,83,163,43,252]] ], dtype=np.int32) mur = 106 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 55 failed!" print("RD_LBP_P42g_R2: test 55 passed!") array = np.array([ [[170,231,10,107,140],[113,26,131,63,162],[147,13,78,165,135],[71,209,225,108,103],[101,131,201,131,125]], [[251,96,40,208,217],[169,133,183,26,19],[67,240,57,43,126],[22,137,206,68,18],[228,81,114,161,101]], [[88,182,220,22,206],[33,135,24,15,218],[107,68,90,75,155],[116,243,191,124,178],[31,22,160,139,246]], [[206,103,110,45,128],[114,48,86,89,48],[129,43,7,244,196],[7,128,134,112,114],[124,33,61,166,0]], [[126,118,48,58,191],[30,31,15,251,103],[84,130,252,16,233],[25,123,92,7,232],[4,117,45,206,148]] ], dtype=np.int32) mur = 105 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 27; assert LBPcode==truth, "RD_LBP_P42g_R2: test 56 failed!" print("RD_LBP_P42g_R2: test 56 passed!") array = np.array([ [[32,223,54,151,154],[171,179,230,33,135],[230,31,216,66,208],[46,214,8,172,224],[11,147,34,143,154]], [[103,54,161,117,37],[204,73,61,29,97],[122,51,85,28,144],[148,12,182,68,11],[7,150,229,100,46]], [[167,70,243,100,189],[1,17,66,100,238],[93,167,244,88,221],[3,162,163,13,218],[68,98,228,86,242]], [[200,104,144,213,106],[78,222,32,141,101],[122,233,124,141,179],[35,251,26,69,171],[198,177,89,9,77]], [[169,94,94,83,91],[149,251,66,43,234],[130,210,150,11,99],[12,18,146,48,186],[168,202,73,91,2]] ], dtype=np.int32) mur = 136 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 57 failed!" print("RD_LBP_P42g_R2: test 57 passed!") array = np.array([ [[251,62,202,99,199],[129,117,206,7,56],[178,112,110,101,217],[188,222,185,18,159],[172,27,198,60,119]], [[117,191,250,37,86],[235,81,50,53,189],[181,218,145,50,39],[119,131,51,14,248],[93,108,232,12,57]], [[200,39,25,131,135],[254,156,143,80,212],[78,215,183,9,38],[252,212,147,105,128],[97,176,237,164,155]], [[47,227,198,104,205],[104,5,233,182,54],[87,102,92,137,21],[113,114,31,47,90],[101,153,156,172,121]], [[75,78,95,46,47],[88,69,79,159,186],[30,120,239,139,170],[3,146,104,246,118],[180,76,124,193,155]] ], dtype=np.int32) mur = 1 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 58 failed!" print("RD_LBP_P42g_R2: test 58 passed!") array = np.array([ [[179,107,138,52,120],[65,15,76,133,92],[80,154,37,10,217],[186,124,194,186,205],[181,38,109,83,108]], [[250,209,106,120,203],[122,209,39,210,20],[39,180,148,155,140],[20,96,98,128,248],[6,74,152,0,48]], [[252,167,141,110,123],[165,5,67,238,228],[181,131,93,202,21],[158,102,97,240,36],[183,198,104,77,94]], [[219,154,24,212,1],[223,151,16,25,14],[243,71,84,78,10],[251,188,206,49,235],[36,231,129,14,132]], [[194,64,151,24,102],[54,99,144,230,143],[226,71,216,97,168],[63,112,21,213,102],[159,146,41,23,225]] ], dtype=np.int32) mur = 55 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 21; assert LBPcode==truth, "RD_LBP_P42g_R2: test 59 failed!" print("RD_LBP_P42g_R2: test 59 passed!") array = np.array([ [[238,206,16,231,25],[229,67,218,43,94],[90,176,250,172,156],[106,149,178,185,210],[164,185,228,45,134]], [[120,11,217,55,228],[144,202,193,122,105],[218,145,159,191,40],[122,50,60,214,249],[222,135,4,14,250]], [[153,194,239,111,162],[234,129,237,220,213],[108,106,153,182,133],[181,152,76,145,247],[151,157,246,66,189]], [[135,112,237,59,47],[251,27,180,34,48],[34,243,43,28,182],[3,38,24,230,181],[40,254,117,219,131]], [[54,38,112,85,63],[132,242,44,59,45],[129,94,215,73,151],[74,22,87,175,38],[141,73,254,25,180]] ], dtype=np.int32) mur = 177 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 60 failed!" print("RD_LBP_P42g_R2: test 60 passed!") array = np.array([ [[54,128,249,54,29],[213,124,78,215,38],[127,227,143,218,177],[123,75,217,87,42],[197,86,2,129,123]], [[179,130,49,70,58],[233,159,53,120,22],[236,31,226,0,213],[5,66,178,42,135],[106,16,10,70,225]], [[2,229,39,58,115],[141,211,235,71,26],[50,245,14,3,201],[223,21,154,123,8],[185,45,163,78,89]], [[209,155,168,132,93],[171,159,132,226,133],[87,81,141,181,198],[23,58,141,185,236],[214,123,169,21,91]], [[106,185,230,127,86],[179,252,94,224,193],[64,242,219,125,120],[52,43,111,187,143],[45,34,195,205,204]] ], dtype=np.int32) mur = 59 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 61 failed!" print("RD_LBP_P42g_R2: test 61 passed!") array = np.array([ [[109,197,86,51,115],[103,139,230,104,142],[244,253,150,81,112],[108,222,226,50,238],[183,182,105,68,63]], [[226,40,82,58,45],[115,197,203,32,50],[17,208,53,99,88],[10,216,144,110,89],[56,163,97,96,222]], [[103,183,246,163,166],[146,102,77,232,139],[217,171,63,148,165],[157,187,186,45,162],[191,112,253,253,129]], [[188,71,110,136,190],[151,187,43,50,189],[252,90,108,214,29],[177,140,251,170,95],[58,61,225,156,152]], [[243,71,115,138,55],[152,137,52,229,95],[76,117,204,27,248],[220,234,47,24,197],[140,18,137,104,116]] ], dtype=np.int32) mur = 219 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 62 failed!" print("RD_LBP_P42g_R2: test 62 passed!") array = np.array([ [[147,96,94,227,222],[8,21,182,104,164],[120,54,170,243,93],[40,26,62,253,67],[105,91,148,147,48]], [[102,25,186,86,192],[181,77,178,30,10],[110,181,4,222,246],[217,225,47,185,61],[25,235,100,9,18]], [[44,97,220,4,24],[246,79,207,252,92],[58,122,168,65,131],[51,100,189,96,75],[140,29,57,196,107]], [[244,227,160,231,183],[237,155,216,39,2],[188,12,154,124,5],[131,135,17,186,14],[243,67,104,158,65]], [[36,6,238,118,172],[89,89,3,88,44],[181,139,123,113,135],[29,230,33,204,249],[244,247,102,190,42]] ], dtype=np.int32) mur = 83 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 19; assert LBPcode==truth, "RD_LBP_P42g_R2: test 63 failed!" print("RD_LBP_P42g_R2: test 63 passed!") array = np.array([ [[236,242,155,26,204],[125,185,114,88,57],[68,150,38,24,119],[213,135,91,198,42],[75,71,133,111,29]], [[212,166,80,129,131],[95,187,213,223,102],[213,65,4,162,220],[41,82,136,140,76],[164,176,34,62,197]], [[151,154,161,199,136],[160,22,123,127,50],[26,107,192,59,227],[190,73,249,35,252],[163,109,133,235,162]], [[163,76,28,195,229],[60,19,247,124,158],[104,103,216,242,39],[5,150,59,18,157],[189,197,63,123,92]], [[136,103,206,157,142],[188,172,24,11,218],[26,86,179,82,124],[214,198,207,75,136],[124,1,180,225,29]] ], dtype=np.int32) mur = 129 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 19; assert LBPcode==truth, "RD_LBP_P42g_R2: test 64 failed!" print("RD_LBP_P42g_R2: test 64 passed!") array = np.array([ [[14,188,121,139,10],[190,50,234,93,170],[97,70,250,6,59],[179,69,43,174,244],[254,226,68,65,73]], [[143,176,172,139,127],[100,207,160,189,222],[188,8,27,240,180],[184,4,23,138,29],[252,84,180,113,91]], [[23,235,170,122,35],[112,73,133,163,215],[219,210,179,51,76],[248,248,96,193,116],[184,70,115,45,227]], [[229,186,234,166,177],[253,223,167,228,252],[79,170,187,73,75],[195,201,238,116,217],[158,206,28,7,20]], [[160,41,115,172,139],[35,22,38,248,90],[27,1,207,96,80],[123,137,162,164,123],[35,23,48,31,63]] ], dtype=np.int32) mur = 252 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 23; assert LBPcode==truth, "RD_LBP_P42g_R2: test 65 failed!" print("RD_LBP_P42g_R2: test 65 passed!") array = np.array([ [[214,199,241,200,146],[15,215,153,7,18],[202,22,142,6,139],[0,149,214,68,251],[200,6,19,211,106]], [[155,181,251,69,212],[45,155,198,227,230],[244,250,13,219,239],[97,44,48,238,186],[192,115,164,104,13]], [[244,150,170,168,158],[103,126,167,227,159],[247,85,23,169,155],[90,236,102,1,71],[35,93,85,134,93]], [[175,16,34,185,41],[177,89,137,186,120],[154,163,216,136,187],[238,134,173,214,17],[131,79,31,139,248]], [[22,202,139,184,109],[221,37,145,163,72],[134,208,240,213,213],[208,128,246,232,57],[178,90,42,98,146]] ], dtype=np.int32) mur = 165 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 66 failed!" print("RD_LBP_P42g_R2: test 66 passed!") array = np.array([ [[214,201,179,145,184],[94,248,131,53,150],[26,238,204,166,90],[65,211,229,145,137],[150,253,235,197,76]], [[113,81,26,13,151],[15,59,149,157,0],[200,129,155,176,32],[140,116,140,231,212],[27,126,128,199,182]], [[55,155,36,252,90],[73,253,167,116,3],[102,208,216,24,160],[213,65,26,137,233],[20,150,171,215,52]], [[121,73,210,200,166],[207,44,214,143,219],[65,154,80,210,217],[100,244,97,150,197],[32,57,26,65,64]], [[98,47,169,22,231],[215,127,210,140,65],[138,197,190,128,74],[69,129,154,203,231],[228,164,72,14,237]] ], dtype=np.int32) mur = 36 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 25; assert LBPcode==truth, "RD_LBP_P42g_R2: test 67 failed!" print("RD_LBP_P42g_R2: test 67 passed!") array = np.array([ [[160,125,114,121,12],[145,181,84,20,44],[135,50,122,2,106],[247,243,232,101,76],[151,69,32,58,50]], [[96,20,200,29,63],[223,138,192,35,174],[121,76,105,152,40],[174,192,111,217,203],[20,119,31,183,65]], [[78,195,83,26,16],[134,19,240,28,57],[79,156,144,104,27],[58,49,184,148,172],[228,136,87,115,242]], [[231,34,59,44,16],[46,224,180,122,119],[199,104,176,10,34],[67,111,142,28,185],[44,191,36,234,78]], [[108,165,102,239,19],[106,218,57,107,62],[245,162,171,201,229],[4,47,57,182,43],[247,158,6,61,76]] ], dtype=np.int32) mur = 65 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 14; assert LBPcode==truth, "RD_LBP_P42g_R2: test 68 failed!" print("RD_LBP_P42g_R2: test 68 passed!") array = np.array([ [[3,208,180,2,108],[206,31,208,26,184],[73,132,22,24,42],[128,204,127,153,86],[66,168,149,7,172]], [[202,20,148,202,216],[132,217,161,244,219],[38,40,21,205,117],[43,12,26,121,136],[92,30,74,88,152]], [[160,33,102,250,62],[69,44,132,238,34],[59,17,9,207,146],[127,118,198,168,231],[91,185,237,43,70]], [[213,252,242,190,121],[128,43,139,51,102],[42,233,85,142,238],[59,102,154,76,128],[20,21,35,48,205]], [[199,147,29,249,29],[197,168,57,252,183],[102,215,132,28,71],[181,3,200,23,126],[39,195,31,75,40]] ], dtype=np.int32) mur = 141 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 69 failed!" print("RD_LBP_P42g_R2: test 69 passed!") array = np.array([ [[210,235,135,226,197],[234,209,226,177,191],[94,180,221,91,186],[244,95,11,92,2],[120,52,198,82,217]], [[163,218,212,248,66],[137,246,24,199,152],[239,13,229,141,85],[23,68,64,4,76],[202,23,165,154,149]], [[51,207,5,217,61],[79,126,200,114,89],[244,67,56,8,131],[253,71,0,133,99],[227,243,57,50,27]], [[54,175,114,215,240],[85,143,231,44,182],[51,224,59,50,169],[110,243,110,141,123],[148,99,230,10,190]], [[60,50,14,142,207],[179,184,161,140,54],[28,183,228,9,168],[48,106,218,212,226],[172,225,144,126,218]] ], dtype=np.int32) mur = 209 V = 3 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array) truth = 43; assert LBPcode==truth, "RD_LBP_P42g_R2: test 70 failed!" print("RD_LBP_P42g_R2: test 70 passed!") array =
np.array([ [[66,254,113,216,105],[83,58,137,155,176],[91,8,29,67,231],[8,56,179,226,32],[173,1,93,70,209]], [[137,125,170,31,137],[178,116,113,189,66],[90,135,120,91,6],[177,232,238,43,192],[100,115,83,113,133]], [[239,236,125,125,206],[71,92,73,215,214],[29,155,223,198,132],[72,104,1,94,64],[156,120,93,51,153]], [[213,7,74,40,172],[237,210,155,64,89],[49,95,196,8,107],[128,77,253,16,124],[106,63,57,56,36]], [[38,182,236,70,90],[129,63,142,242,97],[177,81,71,2,222],[113,5,144,48,248],[168,114,240,35,192]] ], dtype=np.int32) mur = 140 V = 4 lbp = ext3DLBPpy.RD_LBP_P42g_R2(mur, V) LBPcode = lbp.convert(array)
numpy.array
import numpy as np import numpy.linalg as la import scipy.interpolate as inter import scipy.optimize as opt from numpy.polynomial.legendre import leggauss import numpy.random as ra from neml.nlsolvers import MaximumIterations, MaximumSubdivisions, newton, scalar_newton class Driver(object): """ Superclass of all drivers, basically just sets up history and reports results. """ def __init__(self, model, verbose = False, rtol = 1.0e-6, atol = 1.0e-10, miter = 25, T_init = 0.0, no_thermal_strain = False): """ Parameters: model: material model to play with verbose: verbose output rtol: relative tolerance, where needed atol: absolute tolerance, where needed miter: maximum iterations, where needed """ self.model = model self.verbose = verbose self.rtol = rtol self.atol = atol self.miter = miter self.nts = no_thermal_strain self.stress_int = [np.zeros((6,))] self.stored_int = [self.model.init_store()] self.T_int = [T_init] self.t_int = [0.0] self.u_int = [0.0] self.p_int = [0.0] @property def stress(self): return np.array(self.stress_int) @property def stored(self): return np.array(self.stored_int) @property def history(self): return self.stored[:,:self.model.nhist] @property def T(self): return np.array(self.T_int) @property def t(self): return np.array(self.t_int) @property def u(self): return np.array(self.u_int) @property def p(self): return np.array(self.p_int) class Driver_sd(Driver): """ Superclass of generic small strain drivers, contains generic step methods. """ def __init__(self, *args, **kwargs): """ Parameters: model: material model to play with verbose: verbose output rtol: relative tolerance, where needed atol: absolute tolerance, where needed miter: maximum iterations, where needed """ super(Driver_sd, self).__init__(*args, **kwargs) self.strain_int = [np.zeros((6,))] self.thermal_strain_int = [np.zeros((6,))] self.mechanical_strain_int = [np.zeros((6,))] def solve_try(self, RJ, x0, extra = []): """ Try several different nonlinear solvers in the hope that at least one will converge Parameters: RJ: function that returns the residual equations and associated Jacobian x0: initial guess extra: list of extra solver functions of the type below """ def s1(x0i): try: x = newton(RJ, x0i, verbose = self.verbose, rtol = self.rtol, atol = self.atol, miter = self.miter) return x, True except Exception: return np.zeros((12,)), False def s2(x0i): try: res = opt.root(RJ, x0i, jac = True, method = 'lm') return res.x, res.success except Exception: return np.zeros((12,)), False def s3(x0i): try: x = newton(RJ, x0i, verbose = self.verbose, rtol = self.rtol, atol = self.atol, miter = self.miter, linesearch = 'backtracking') return x, True except Exception: return np.zeros((12,)), False solvers = [s1,s3] guesses = [x0] + extra success = False for xi in guesses: for solv in solvers: x, success = solv(xi) if success: break if success: break if not success: raise MaximumIterations() return x @property def strain(self): return np.array(self.strain_int) @property def thermal_strain(self): return np.array(self.thermal_strain_int) @property def mechanical_strain(self): return np.array(self.mechanical_strain_int) def update_thermal_strain(self, T_np1): """ Move the thermal strains to the next step Parameters: T_np1: next temperature """ if self.nts: return np.zeros((6,)) else: dT = T_np1 - self.T_int[-1] a_np1 = self.model.alpha(T_np1) a_n = self.model.alpha(self.T_int[-1]) return self.thermal_strain_int[-1] + dT * (a_np1 + a_n) / 2 * np.array([1.0,1,1,0,0,0]) def strain_step(self, e_np1, t_np1, T_np1): """ Take a strain-controlled step Parameters: e_np1: next strain t_np1: next time T_np1: next temperature """ enext = self.update_thermal_strain(T_np1) s_np1, h_np1, A_np1, u_np1, p_np1 = self.model.update_sd(e_np1 - enext, self.mechanical_strain_int[-1], T_np1, self.T_int[-1], t_np1, self.t_int[-1], self.stress_int[-1], self.stored_int[-1], self.u_int[-1], self.p_int[-1]) self.strain_int.append(np.copy(e_np1)) self.mechanical_strain_int.append(e_np1 - enext) self.thermal_strain_int.append(enext) self.stress_int.append(np.copy(s_np1)) self.stored_int.append(np.copy(h_np1)) self.T_int.append(T_np1) self.t_int.append(t_np1) self.u_int.append(u_np1) self.p_int.append(p_np1) def stress_step(self, s_np1, t_np1, T_np1): """ Take a stress-controlled step Parameters: s_np1: next stress t_np1: next time T_np1: next temperature """ enext = self.update_thermal_strain(T_np1) def RJ(e): s, h, A, u, p = self.model.update_sd(e - enext, self.mechanical_strain_int[-1], T_np1, self.T_int[-1], t_np1, self.t_int[-1], self.stress_int[-1], self.stored_int[-1], self.u_int[-1], self.p_int[-1]) R = s - s_np1 return R, A if len(self.strain_int) > 1: inc = self.strain_int[-1] - self.strain_int[-2] extra = [self.strain_int[-1] + inc] else: extra = [] e_np1 = self.solve_try(RJ, self.strain_int[-1], extra = extra) self.strain_step(e_np1, t_np1, T_np1) def erate_step(self, sdir, erate, t_np1, T_np1, einc_guess = None, ainc_guess = None): """ Drive in a given stress direction at a prescribed strain rate, like an actual "stress controlled" experiment. Parameters: sdir: stress direction erate: strain rate (in the direction) t_np1: next time T_np1: next temperature einc_guess: a guess at the strain increment ainc_guess: a guess at the stress increment """ sdir = sdir / la.norm(sdir) dt = t_np1 - self.t_int[-1] enext = self.update_thermal_strain(T_np1) def RJ(x): a = x[0] e_inc = x[1:] s, h, A, u, p = self.model.update_sd(self.strain_int[-1] + e_inc - enext, self.mechanical_strain_int[-1], T_np1, self.T_int[-1], t_np1, self.t_int[-1], self.stress_int[-1], self.stored_int[-1], self.u_int[-1], self.p_int[-1]) R = np.zeros((7,)) J = np.zeros((7,7)) R[:6] = s - (sdir * a + self.stress_int[-1]) R[6] = np.dot(e_inc, sdir) / dt - erate J[:6,0] = -sdir J[:6,1:] = A J[6,0] = 0.0 J[6,1:] = sdir / dt return R, J x0 = np.zeros((7,)) if einc_guess is not None: x0[1:] = einc_guess else: x0[1:] = sdir / 10000.0 if ainc_guess is not None: x0[0] = ainc_guess else: x0[0] = 1.0 x = self.solve_try(RJ, x0) e_np1 = self.strain_int[-1] + x[1:] self.strain_step(e_np1, t_np1, T_np1) return x[1:], x[0] def erate_einc_step(self, sdir, erate, einc, T_np1, **kwargs): """ Similar to erate_step but specify the strain increment instead of the time increment. Parameters: sdir: stress direction erate: strain rate, in stress direction einc: strain increment, in stress direction T_np1: temperature at next time step """ dt = einc / erate return self.erate_step(sdir, erate, self.t_int[-1] + dt, T_np1, **kwargs) def srate_sinc_step(self, sdir, srate, sinc, T_np1): """ Similar to rate_step but specify the stress increment instead of the time increment. Parameters: sdir: stress direction srate: stress rate sinc: stress increment T_np1: temperature at next time step """ if np.allclose(sdir, 0.0): s_np1 = self.stress_int[-1] else: s_np1 = self.stress_int[-1] + sdir / la.norm(sdir) * sinc if np.isclose(srate, 0.0): dt = 0.0 else: dt = np.abs(np.dot(s_np1 - self.stress_int[-1], sdir) / srate) self.stress_step(s_np1, self.t_int[-1] + dt, T_np1) def strain_hold_step(self, i, t_np1, T_np1, q = 1.0, E = -1.0): """ A special, mixed step which holds the strain in index i constant while holding the stress in the other directions to their previous values Parameters: i: index to hold t_np1: next time T_np1: next temperature q: follow up factor E: Young's modulus to use -- must redo interface at some point """ if not np.isclose(q, 1.0) and np.isclose(E, -1.0): raise ValueError("You must supply the Youngs modulus") enext = self.update_thermal_strain(T_np1) oset = sorted(list(set(range(6)) - set([i]))) def RJ(e_np1): s, h, A, u, p = self.model.update_sd(e_np1 - enext, self.mechanical_strain_int[-1], T_np1, self.T_int[-1], t_np1, self.t_int[-1], self.stress_int[-1], self.stored_int[-1], self.u_int[-1], self.p_int[-1]) R = np.zeros((6,)) R[0] = (e_np1[i] - self.strain_int[-1][i] ) + (s[i] - self.stress_int[-1][i]) / E * (q - 1) R[1:] = s[oset] - self.stress_int[-1][oset] J = np.zeros((6,6)) J[0,0] = 1.0 J[0,:] += A[i,:] / E * (q - 1) J[1:,:] = A[oset,:][:] return R, J x0 = np.copy(self.strain_int[-1]) e_np1 = self.solve_try(RJ, x0) self.strain_step(e_np1, t_np1, T_np1) def uniaxial_test(model, erate, T = 300.0, emax = 0.05, nsteps = 250, sdir = np.array([1,0,0,0,0,0]), verbose = False, offset = 0.2/100.0, history = None, tdir = np.array([0,1,0,0,0,0]), rtol = 1e-6, atol = 1e-10, miter = 25): """ Make a uniaxial stress/strain curve Parameters: model: material model erate: strain rate Keyword Args: T: temperature, default 300.0 emax: maximum strain, default 5% nsteps: number of steps to use, default 250 sdir: stress direction, default tension in x verbose: whether to be verbose offset: used to calculate yield stress history: initial model history tdir: transverse direction for Poisson's ratio Returns: dict: results dictionary containing... **Results in dictionary:** ================= ============================================ Name Description ================= ============================================ strain strain in direction stress stress in direction energy_density strain energy density plastic_work plastic dissipation youngs young's modulus of initial curve yield yield stress implied by curve poissons poisson's ratio implied by non-axial strains ================= ============================================ """ e_inc = emax / nsteps driver = Driver_sd(model, verbose = verbose, T_init = T, rtol = rtol, atol = atol, miter = miter) if history is not None: driver.stored_int[0] = history strain = [0.0] stress = [0.0] for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) strain = np.array(strain) stress = np.array(stress) # Calculate the yield stress and Young's modulus E = np.abs(stress[1]) / np.abs(strain[1]) nu = -np.dot(driver.strain_int[1], tdir) / np.dot( driver.strain_int[1], sdir) sfn = inter.interp1d(np.abs(strain), np.abs(stress)) tfn = lambda e: E * (e - offset) try: sYe = opt.brentq(lambda e: sfn(e) - tfn(e), 0.0, np.max(strain)) sY = tfn(sYe) except Exception: sY = np.inf return {'strain': strain, 'stress': stress, 'energy_density': np.copy(driver.u), 'plastic_work': np.copy(driver.p), 'youngs': E, 'yield': sY, 'poissons': nu, 'history': driver.stored_int[-1]} def strain_cyclic(model, emax, R, erate, ncycles, T = 300.0, nsteps = 50, sdir = np.array([1,0,0,0,0,0]), hold_time = None, n_hold = 25, verbose = False, check_dmg = False, dtol = 0.75): """ Strain controlled cyclic test. Parameters: emax: maximum strain R: R = emin / emax erate: strain rate to go at ncycles: number of cycles T: temperature, default 300 Keyword Args: nsteps: number of steps per half cycle sdir: stress direction, defaults to x and tension first hold_time: if None don't hold, if scalar then hold symmetrically top/bot if an array specify different hold times for first direction (default tension) and second direction n_hold: number of steps to hold over verbose: whether to be verbose check_dmg: check to see if material damage exceeds dtol, stop the simulation when that happens dtol: damage to stop at Returns: dict: results dictionary containing... **Results in dictionary:** ============= ======================== Name Description ============= ======================== strain: strain in direction stress: stress in direction cycles: list of cycle numbers max: maximum stress per cycle min: minimum stress per cycle mean: mean stress per cycle ============= ======================== """ # Setup driver = Driver_sd(model, verbose = verbose, T_init = T) emin = emax * R if hold_time: if np.isscalar(hold_time): hold_time = [hold_time, hold_time] else: hold_time = [0,0] # Setup results strain = [0.0] stress = [0.0] time = [0.0] cycles = [] smax = [] smin = [] smean = [] ecycle = [] pcycle = [] # First half cycle if verbose: print("Initial half cycle") e_inc = emax / nsteps try: for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) except Exception as e: print("Failed to make first half cycle") raise e # Begin cycling for s in range(ncycles): if verbose: print("Cycle %i" % s) try: # Tension hold if hold_time[0] > 0.0: dt = hold_time[0] / n_hold for i in range(n_hold): einc, ainc = driver.erate_step(sdir, 0.0, time[-1] + dt, T, einc_guess = np.zeros((6,)), ainc_guess = -1) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) si = len(driver.strain_int) e_inc = np.abs(emin - emax) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = -einc, ainc_guess = -ainc) else: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Compression hold if hold_time[1] > 0.0: dt = hold_time[1] / n_hold for i in range(n_hold): einc, ainc = driver.erate_step(sdir, 0.0, time[-1] + dt, T, einc_guess = np.zeros((6,)), ainc_guess = -1) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) e_inc = np.abs(emax - emin) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = -einc, ainc_guess = -ainc) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Calculate if np.isnan(max(stress[si:])) or np.isnan(min(stress[si:])): break cycles.append(s) smax.append(max(stress[si:])) smin.append(min(stress[si:])) smean.append((smax[-1]+smin[-1])/2) ecycle.append(driver.u_int[-1]) pcycle.append(driver.p_int[-1]) except Exception as e: break # Setup and return return {"strain": np.array(strain), "stress": np.array(stress), "cycles": np.array(cycles, dtype = int), "max": np.array(smax), "min": np.array(smin), "mean": np.array(smean), "energy_density": np.array(ecycle), "plastic_work": np.array(pcycle), "history": driver.stored_int[-1], "time": np.array(time)} def strain_cyclic_extrapolated(model, emax, R, erate, ncycles, T = 300.0, nsteps = 50, sdir = np.array([1,0,0,0,0,0]), hold_time = None, n_hold = 25, verbose = False, check_dmg = False, dtol = 0.75, min_cycle=3, unit_extrapolate = 10, jump_delta_N=10, allowable_jump_stress=5.0): """ Strain controlled cyclic test extrapolation. Extra Keyword Args: min_cycle minimum cycles to start the extrapolation process unit_extrapolate number of cycles to perform single cycle extrapolation jump_delta_N number of cycles to jump allowable_jump_stress extrapolate when stress jump is within this limit Returns: dict: results dictionary containing... **Results in dictionary:** ============= ======================== Name Description ============= ======================== cycles: list of cycle numbers max: maximum stress per cycle min: minimum stress per cycle ============= ======================== """ # Setup driver = Driver_sd(model, verbose = verbose, T_init = T) emin = emax * R if hold_time: if np.isscalar(hold_time): hold_time = [hold_time, hold_time] else: hold_time = [0,0] # Setup results strain = [0.0] stress = [0.0] time = [0.0] cycles = [] smax = [] smin = [] smean = [] ecycle = [] pcycle = [] # First half cycle if verbose: print("Initial half cycle") e_inc = emax / nsteps try: for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) except Exception as e: print("Failed to make first half cycle") raise e s = 0 # steps in one cycle if (hold_time[0] > 0) and (hold_time[1] == 0): steps = 2*nsteps + n_hold elif (hold_time[1] > 0) and (hold_time[0] == 0): steps = 2*nsteps + n_hold elif (hold_time[0] > 0) and (hold_time[1] > 0): steps = 2*nsteps + 2*n_hold else: steps = 2*nsteps extrapolate = False while s < ncycles: if verbose: print("Cycle %i" % s) if check_dmg: if driver.stored_int[-1][0] > dtol: print("Damage check exceeded") break if (s >= min_cycle) and (extrapolate == True): # No extrapolation before min_cycle if (s <= unit_extrapolate): # single cycle jump for first unit_extrapolate cycles delta_N = 1 else: delta_N = jump_delta_N # specified cycles to jump n = len(driver.stored_int) # extrapolating history pos_hist_last_last = driver.stored_int[n - 1 - steps] pos_hist_last = driver.stored_int[n-1] dN_1 = cycles[-1] - cycles[-2] pos_extrapolated_history = pos_hist_last + (pos_hist_last - pos_hist_last_last)*delta_N/dN_1 # extrapolating smax smax_last_last = smax[-2] smax_last = smax[-1] extrapolated_smax = smax_last + (smax_last - smax_last_last)*delta_N/dN_1 # extrapolating smax smin_last_last = smin[-2] smin_last = smin[-1] extrapolated_smin = smin_last + (smin_last - smin_last_last)*delta_N/dN_1 # criteria for extrapolation pos_stress_last_last = driver.stress_int[n - 1 - 2*steps] pos_stress_last = driver.stress_int[n-1] pos_extrapolated_stress = pos_stress_last + (pos_stress_last - pos_stress_last_last)*delta_N/dN_1 stress_jump = pos_extrapolated_stress[0] - pos_stress_last[0] if np.fabs(stress_jump) <= allowable_jump_stress: s = s + delta_N if s > ncycles: break driver.stored_int.append(pos_extrapolated_history) driver.stress_int.append(pos_extrapolated_stress) smax.append(extrapolated_smax) smin.append(extrapolated_smin) cycles.append(s) extrapolate = False else: extrapolate = False else: try: if hold_time[0] > 0.0: dt = hold_time[0] / n_hold for i in range(n_hold): einc, ainc = driver.erate_step(sdir, 0.0, time[-1] + dt, T, einc_guess = np.zeros((6,)), ainc_guess = -1) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) si = len(driver.strain_int) e_inc = np.abs(emin - emax) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = -einc, ainc_guess = -ainc) else: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Compression hold if hold_time[1] > 0.0: dt = hold_time[1] / n_hold for i in range(n_hold): einc, ainc = driver.erate_step(sdir, 0.0, time[-1] + dt, T, einc_guess = np.zeros((6,)), ainc_guess = -1) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) e_inc = np.abs(emax - emin) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = -einc, ainc_guess = -ainc) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Calculate if np.isnan(max(stress[si:])) or np.isnan(min(stress[si:])): break s += 1 cycles.append(s) smax.append(max(stress[si:])) smin.append(min(stress[si:])) smean.append((smax[-1]+smin[-1])/2) ecycle.append(driver.u_int[-1]) pcycle.append(driver.p_int[-1]) extrapolate = True except Exception as e: break # Setup and return return {"cycles": np.array(cycles, dtype = int), "max": np.array(smax), "min": np.array(smin),"time": np.array(time)} def strain_cyclic_followup(model, emax, R, erate, ncycles, q = 1.0, T = 300.0, nsteps = 50, sind = 0, hold_time = None, n_hold = 25, verbose = False, check_dmg = False, dtol = 0.75, logspace = False): """ Strain controlled cyclic test with follow up. This is a "fallback" to the old version that does things by index so that I can use the index-based hold routine with follow up Parameters: emax: maximum strain R: R = emin / emax erate: strain rate to go at ncycles: number of cycles Keyword Args: q: follow up factor T: temperature, default 300 nsteps: number of steps per half cycle sind: index to pull on hold_time: if None don't hold, if scalar then hold symmetrically top/bot if an array specify different hold times for first direction (default tension) and second direction n_hold: number of steps to hold over verbose: whether to be verbose check_dmg: check to see if damage exceeds a threshold dtol: damage threshold logspace: logspace the hold time steps (instead of linspace) Returns: dict: dictionary of results... **Results in dictionary** ========= ======================== Name Description ========= ======================== strain strain in direction stress stress in direction cycles list of cycle numbers max maximum stress per cycle min minimum stress per cycle mean mean stress per cycle ========= ======================== """ # Setup sdir = np.zeros((6,)) sdir[sind] = 1.0 res = uniaxial_test(model, erate, T = T, emax = 1.0e-4, nsteps = 2) E = res['youngs'] driver = Driver_sd(model, verbose = verbose, T_init = T) emin = emax * R if hold_time: if np.isscalar(hold_time): hold_time = [hold_time, hold_time] else: hold_time = [0,0] # Setup results strain = [0.0] stress = [0.0] time = [0.0] cycles = [] smax = [] smin = [] smean = [] ecycle = [] pcycle = [] # First half cycle if verbose: print("Initial half cycle") e_inc = emax / nsteps try: for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) except Exception as e: print("Failed to make first half cycle") raise e # Begin cycling for s in range(ncycles): if verbose: print("Cycle %i" % s) try: # Tension hold if hold_time[0] > 0.0: if logspace: dts = np.diff(np.logspace(0, np.log10(hold_time[0]), n_hold+1)) else: dts = np.diff(np.linspace(0,hold_time[0],n_hold+1)) #dt = hold_time[0] / n_hold for i, dt in enumerate(dts): driver.strain_hold_step(sind, time[-1] + dt, T, q = q, E = E) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) si = len(driver.strain_int) e_inc = np.abs(emin - emax) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = np.zeros((6,)), ainc_guess = -1) else: einc, ainc = driver.erate_einc_step(-sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Compression hold if hold_time[1] > 0.0: if logspace: dts = np.diff(np.logspace(0, np.log10(hold_time[1]), n_hold+1)) else: dts = np.diff(np.linspace(0,hold_time[1],n_hold+1)) for i, dt in enumerate(dts): driver.strain_hold_step(sind, time[-1] + dt, T, q = q, E = E) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + dt) e_inc = np.abs(emax - emin) / nsteps for i in range(nsteps): if i == 0: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = np.zeros((6,)), ainc_guess = 1.0) else: einc, ainc = driver.erate_einc_step(sdir, erate, e_inc, T, einc_guess = einc, ainc_guess = ainc) if check_dmg: if driver.stored_int[-1][0] > dtol: raise Exception("Damage check exceeded") strain.append(np.dot(driver.strain_int[-1], sdir)) stress.append(np.dot(driver.stress_int[-1], sdir)) time.append(time[-1] + e_inc / erate) # Calculate if np.isnan(max(stress[si:])) or np.isnan(min(stress[si:])): break cycles.append(s) smax.append(max(stress[si:])) smin.append(min(stress[si:])) smean.append((smax[-1]+smin[-1])/2) ecycle.append(driver.u_int[-1]) pcycle.append(driver.p_int[-1]) except Exception as e: break # Setup and return return {"strain": np.array(strain), "stress": np.array(stress), "cycles": np.array(cycles, dtype = int), "max": np.array(smax), "min": np.array(smin), "mean": np.array(smean), "energy_density": np.array(ecycle), "plastic_work": np.array(pcycle), "history": driver.stored_int[-1], "time":
np.array(time)
numpy.array
import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as mpltcols import matplotlib.patches as mpatches from typing import Tuple def cMDS(D: np.ndarray, is_similarity: bool = False ) -> Tuple: ''' Computes Classical Multidimensional Scaling from a given distance, or similarity, matrix D. Parameters ---------- D : np.ndarray A distance, or similarity, matrix (squared matrix) is_similarity: bool Determines if D is a similarity matrix (True) or a distance matrix (False) Returns ------- Tuple F.T: np.ndarray transposed configuration matrix F D_sq: np.ndarray squared distance matrix D B: np.ndarray double centering matrix = -0.5*J*D^2*J e_vals: np.array eigenvalues of B Modified from: http://www.nervouscomputer.com/hfs/cmdscale-in-python/ ''' assert D.shape[0] == D.shape[1] if is_similarity: D = 1 - D # If D is a similarity matrix, convert it to a distance matrix # Number of samples n = len(D) # Create the Squared proximity matrix D_sq = D**2 # Generate the Centering matrix: J # Defined as the identity matrix I_n minums an nxn all-ones matrix e1 = np.ones((n,1)) m = (
np.ones((n, 1))
numpy.ones
#/usr/bin/env python import rospy import numpy as np import cv2 import sys from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError cvb=CvBridge() i_rgb=0 i_depth=0 rgb_mem=None depth_mem=None first_flag_depth=True first_flag_rgb=True depth_folder = "" rgb_folder = "" def imnormalize(xmax,image): """ Normalize a list of sample image data in the range of 0 to 1 : image:image data. : return: Numpy array of normalize data """ xmin = 0 a = 0 b = 255 return ((np.array(image,dtype=np.float32) - xmin) * (b - a)) / (xmax - xmin) def grabrgb(msg): global i_rgb global rgb_mem global first_flag_rgb if i_rgb <= i_depth : try: cv_image = cvb.imgmsg_to_cv2(msg,"bgr8") except CvBridgeError as e: print(e) image_normal= np.array(cv_image) if first_flag_rgb == True : rgb_mem = np.copy(image_normal) np.save(rgb_folder+"/frame"+str(i_rgb)+".npy",image_normal) cv2.imwrite(rgb_folder+"/frame"+str(i_rgb)+".jpg", image_normal) first_flag_rgb=False elif np.array_equal(rgb_mem,image_normal) : return else : rgb_mem =
np.copy(image_normal)
numpy.copy
# -*- coding: utf-8 -*- """ Created on Wed Aug 6 12:09:43 2014 @author: jc3e13 Too many little modules were cluttering the directory so I have shoved them all into one miscellaneous 'utilities' module. To keep things simple this should only import modules from the python standard library or numpy and scipy. """ import numpy as np import scipy.signal as sig import scipy.io as io import scipy.stats as stats from datetime import datetime, timedelta class Bunch(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def wrapphase(x): """Keep phase between 0 and 2pi.""" return np.pi * 2 * (x / (np.pi * 2) - np.floor_divide(x, np.pi * 2)) def closearr(x): """Add first array value to end. 1D arrays only!""" return np.hstack((x, x[0])) def convolve_smooth(x, win=10, mode="same"): """Smooth data using a given window size, in units of array elements, using the numpy.convolve function.""" return np.convolve(x, np.ones((win,)), mode=mode) / win def repand(func, *args): """ Perform bitwise and (&) operation on any number of arrays. Parameters ---------- func : function The function that converts the arrays into boolean arrays. e.g. np.isfinite args : arrays The arrays to compare. They must all be the same shape. Returns ------- out : boolean ndarray Array of booleans. """ out = np.full_like(args[0], True, dtype=bool) for arg in args: out = out & func(arg) return out def datenum_to_datetime(datenum): """ Convert a MATLAB datenums into python datetimes. Parameters ---------- datenum : array_like MATLAB datenumber which is the number of days since 0000-01-00. Returns ------- dt : ndarray Python datetime. See datetime module. """ def convert(datenum): try: return ( datetime.fromordinal(int(datenum)) + timedelta(days=datenum % 1) - timedelta(days=366) ) except ValueError: return np.nan if np.iterable(datenum): datenumar = np.asarray(datenum) shape = datenumar.shape dt = np.array([convert(el) for el in datenumar.flat]) dt = dt.reshape(shape) else: dt = convert(datenum) return dt def datetime64_to_datenum(dt): """Skeleton function might work needs improving.""" dt = dt.astype("datetime64[s]") dt0 = np.datetime64("0000-01-01T00:00:00") return (dt - dt0) / np.timedelta64(86400, "s") + 1 def datetime_to_datenum(dt): """ Convert a python datetime object into a MATLAB datenum. Parameters ---------- dt : array_like Python datetime. See datetime module. Returns ------- datenum : ndarray MATLAB datenumber which is the number of days since 0000-01-00. """ def convert(dt): try: mdn = dt + timedelta(days=366) frac_seconds = (dt - datetime(dt.year, dt.month, dt.day)).seconds / 86400.0 frac_microseconds = dt.microsecond / 8.64e10 return mdn.toordinal() + frac_seconds + frac_microseconds except ValueError: return np.nan if np.iterable(dt): dtar = np.asarray(dt) shape = dtar.shape datenum = np.array([convert(el) for el in dtar.flat]) datenum = datenum.reshape(shape) else: datenum = convert(dt) return datenum def lldist(lon, lat): """Calculates the distance between longitude and latitude coordinates on a spherical earth with radius using the Haversine formula. Code modified from the MATLAB m_map toolbox function m_lldist.m. Parameters ---------- lon : 1-D numpy.ndarray of floats. Longitude values. [degrees] lat : 1-D numpy.ndarray of floats. Latitude values. [degrees] Returns ------- dist : 1-D numpy.ndarray of floats. Distance between lon and lat positions. [km] Notes ----- This functionality does exist in the Gibbs seawater toolbox as gsw.dist. """ lon = np.asarray(lon) lat = np.asarray(lat) pi180 = np.pi / 180.0 earth_radius = 6378.137 # [km] lat1 = lat[:-1] * pi180 lat2 = lat[1:] * pi180 dlon = np.diff(lon) * pi180 dlat = lat2 - lat1 a = (np.sin(dlat / 2.0)) ** 2 + np.cos(lat1) * np.cos(lat2) * ( np.sin(dlon / 2.0) ) ** 2 angles = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) dist = earth_radius * angles return dist def distll(lon_0, lat_0, x, y): """ """ pi180 = np.pi / 180.0 earth_radius = 6378.137 # [km] r = earth_radius * np.cos(pi180 * lat_0) dlons = x / (r * pi180) dlats = y / (earth_radius * pi180) lons = lon_0 + dlons lats = lat_0 + dlats return lons, lats def mid(x, axis=0): """Returns mid point values along given axis.""" ndim = np.ndim(x) if ndim == 1: return 0.5 * (x[1:] + x[:-1]) elif ndim > 1: x_ =
np.swapaxes(x, axis, 0)
numpy.swapaxes
import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from statsmodels.nonparametric.smoothers_lowess import lowess from matplotlib.collections import LineCollection import re from . import colors color_styles = colors.color_styles plt.style.use('fivethirtyeight') def _bin_data(data, yname, xname, bins=50, agg_fn=np.mean): data = data.dropna() hist, edges = np.histogram(data[xname], bins=bins) bin_midpoint = np.zeros(edges.shape[0]-1) binned_df = pd.DataFrame(np.zeros((edges.shape[0]-1, 1))) for i in range(edges.shape[0]-1): bin_midpoint[i] = (edges[i] + edges[i+1]) / 2 if i < edges.shape[0]-2: dat_temp = data.loc[(data[xname] >= edges[i]) & ( data[xname] < edges[i+1]), :] binned_df.loc[binned_df.index[i], yname] = agg_fn(dat_temp[yname]) binned_df.loc[binned_df.index[i], xname] = bin_midpoint[i] binned_df.loc[binned_df.index[i], 'n_obs'] = dat_temp.shape[0] else: dat_temp = data.loc[(data[xname] >= edges[i]) & ( data[xname] <= edges[i+1]), :] binned_df.loc[binned_df.index[i], yname] = agg_fn(dat_temp[yname]) binned_df.loc[binned_df.index[i], xname] = bin_midpoint[i] binned_df.loc[binned_df.index[i], 'n_obs'] = dat_temp.shape[0] return binned_df ##TODO: Add some Prof. XU's interflex and fect estimation and plot functions class _regview(object): """ panelview: extract data panel eda panel effect list, if len(effect) equals to two, the first is crosssection while the second is time formula: outcome ~ *key + @treatment + (control1 + control2 + control3) """ def __init__(self,data,outcome=None,key=None,treatment =None, controls=None,effect=None, entity_effects=False, TimeEffects=False, bins =None, th = None, main = None ): if effect: data = data.set_index(effect) if len(effect) == 2: data = data.sort_index(level=0) ###TODO: this is for my convenience, but I have to write in a more general way ##TODO: support formula self.outcome = outcome self.key = key self.treatment = treatment self.controls = controls self.effect = effect self.data = data self.main = main self.th =th self.bins =bins #heatmap def panelviewtreat(self,cmp=None): if self.effect == None: err = "to draw panel data plots, you must set index first" raise ValueError(err) t = sorted(set(self.data.index.get_level_values(1))) r = sorted(set(self.data.index.get_level_values(0))) ma_li = [] for i in r: r_li = [] for j in t: if j in self.data.loc[i].index: r_li.append(self.data.loc[i].loc[j][self.treatment]) else: r_li.append(np.nan) ma_li.append(r_li) ma = np.asarray(ma_li) cmap = mpl.colors.ListedColormap(['#3D71A0', '#B70050']) x = np.arange(ma.shape[1] + 1) y = np.arange(ma.shape[0] + 1) if cmp: cmap = mpl.colors.ListedColormap(cmp) size_x = 10 size_y = round(10/len(x)*len(y)) fig, ax = plt.subplots(figsize=(size_x,size_y),dpi=400) im = ax.pcolor(x, y, ma, vmin=0, vmax=1,cmap=cmap) ax.vlines(np.arange(ma.shape[1]), ymin=0, ymax=ma.shape[0],linestyles='-', alpha=0.4,lw=.2) ax.hlines(np.arange(ma.shape[0]), xmin=0, xmax=ma.shape[1],linestyles='-', alpha=0.4,lw=.2) miss = mpl.lines.Line2D([], [], color='#EAF2F8',ls='', marker = 's',label='missing') uncon = mpl.lines.Line2D([], [], color= '#3D71A0',ls='', marker = 's',label='under control') untreat = mpl.lines.Line2D([], [], color= '#B70050',ls='', marker = 's',label='under treatment') # etc etc plt.legend(bbox_to_anchor=(.8, -.03),handles=[miss, uncon,untreat],ncol=3) ##set main if self.main: ax.set_title(self.main) ## set labels ax.set_yticks(np.arange(ma.shape[0])+0.5) ax.set_yticklabels(r) ax.set_xticks(np.arange(ma.shape[1])+0.5) ax.set_xticklabels(t,rotation=90) ##lables ax.set_xlabel(self.effect[0], fontsize=18) ax.set_ylabel(self.effect[1], fontsize=16) ##line def panelviewline(self,colormap = ['#B5D4E9', '#E4B4B4']): if self.effect == None: err = "to draw panel data plots, you must set index first" raise ValueError(err) def _color_line(d,cate,y,t,c): #data, category, y(outcome), treatment, color x = d.loc[cate].index y = d.loc[cate][y] t = d.loc[cate][t] # select how to color color = [] for i in range(len(y)): if t.iloc[i] == 0: color.append(c[0]) else: color.append(c[1]) # get segments xy =
np.array([x, y])
numpy.array
import os import glob import numpy as np import math """ Conversion between kitti coordinates and camera coordinates is: Cam: x,y,z ==> Kitti: -y,-z,x """ BASE_DIR="./" LABEL_DIR="training/label_2" LOC_DIR="training/locational" REF="_out1" def read_client_dir(): folders = next(os.walk(BASE_DIR))[1] return folders def distance_check(loc1, loc2): distance=np.sqrt((loc1[0]-loc2[0])**2+(loc1[1]-loc2[1])**2+(loc1[2]-loc2[2])**2) if distance>1: return True def get_path(label_folder,loc_folder,ref_folder,fname): label_path=os.path.join(label_folder,fname) loc_path=os.path.join(loc_folder,fname) ref_path=os.path.join(ref_folder,fname) return label_path, loc_path, ref_path def tx_label(count,save_folder): car_count=0 #clients=read_client_dir() clients=['_out1', '_out2', '_out3', '_out4'] print(clients) for ind in range(count): fname = ("%06d.txt"%ind) txmed_labels=[] txmed_locs=[] ref_locs=[] for cur_client in clients: label_folder=os.path.join(cur_client,LABEL_DIR) loc_folder=os.path.join(cur_client,LOC_DIR) ref_folder=os.path.join(REF,LOC_DIR) label_path, loc_path, ref_path = get_path(label_folder,loc_folder,ref_folder,fname) #for label_path, loc_path, ref_path in zip(label_paths, loc_paths, ref_paths): loc=open(loc_path,'r').read() ref=open(ref_path,'r').read() ref = np.array([float(i) for i in ref.split()]) loc = np.array([float(i) for i in loc.split()]) label_f=open(label_path,'r') labels = [line.rstrip('\n') for line in label_f] #if len(labels)==0: # np.savetxt(os.path.join(save_folder,os.path.basename(label_path)),txmed_labels,delimiter=' ') # continue for label in labels: label = np.array([float(i) for i in label.split() if i!= 'Car']) if cur_client==REF: ref_locs.append(label[[10,11,12]]) continue #print(label.shape) yaw=np.radians(-ref[4]) yaw2=np.radians(-loc[4]) rot = np.matrix(np.identity(2)) rot[0,0]=np.cos(yaw) rot[0,1]=-np.sin(yaw) rot[1,0]=np.sin(yaw) rot[1,1]=np.cos(yaw) rel_loc=loc-ref #print(rel_loc) rel_loc[0],rel_loc[1] = rot*np.array([[rel_loc[0]],[rel_loc[1]]]) #print(rel_loc) txm_mat = get_matrix(rel_loc) label_location=label[10:13] label[3:7]=txm_bbox(label[10:13],label[7:10]) label_location[[1,2,0]]=label_location[[0,1,2]] #print(label_location) label_rot_y=label[13] label_location=np.concatenate((label_location,[1]))[np.newaxis] #print(label_location.shape) txmed_location=txm_mat*label_location.T label[10:13]=txmed_location[0:3].flatten() label[[10,11,12]]=label[[11,12,10]] label[13]=label_rot_y+yaw2-yaw label[3:7]=txm_bbox(label[10:13],label[7:10]) label_str=[] for ind,elem in enumerate(label): if ind<7: label_str.append(str(int(elem))) else: label_str.append(str(elem)) save=True for loc2 in ref_locs: if distance_check(label[[10,11,12]], loc2): continue else: save=False break for loc2 in txmed_locs: if distance_check(label[[10,11,12]], loc2) and save: continue else: save=False break if save: car_count+=1 txmed_locs.append(label[[10,11,12]]) txmed_labels.append('Car '+' '.join([elem for elem in label_str])) filename=os.path.join(save_folder,os.path.basename(label_path)) with open(filename, 'w') as f: labels_str="\n".join([str(label) for label in txmed_labels]) f.write(labels_str) f.close() #np.savetxt(os.path.join(save_folder,os.path.basename(label_path)),txmed_labels,delimiter=' ',fmt='%c') print("\n\n\n{}\n\n\n".format(car_count)) return def get_matrix(rel_loc,sc_x=1.0, sc_y=1.0,sc_z=1.0): """ Creates matrix from carla transform. """ x,y,z,pitch,yaw,roll = rel_loc #yaw=yaw+180 c_y = np.cos(np.radians(yaw)) s_y = np.sin(np.radians(yaw)) c_r = np.cos(np.radians(roll)) s_r = np.sin(np.radians(roll)) c_p = np.cos(np.radians(pitch)) s_p = np.sin(np.radians(pitch)) matrix = np.matrix(np.identity(4)) matrix[0, 3] = x matrix[1, 3] = y matrix[2, 3] = z matrix[0, 0] = sc_x*c_p * c_y matrix[0, 1] = sc_y*(c_y * s_p * s_r - s_y * c_r) matrix[0, 2] = -sc_z*(c_y * s_p * c_r + s_y * s_r) matrix[1, 0] = sc_x*s_y * c_p matrix[1, 1] = sc_y*(s_y * s_p * s_r + c_y * c_r) matrix[1, 2] = sc_z*(-s_y * s_p * c_r + c_y * s_r) matrix[2, 0] = sc_x*s_p matrix[2, 1] = -sc_y*(c_p * s_r) matrix[2, 2] = sc_z*(c_p * c_r) return matrix def get_datapoint_count(folders): label_count = -1 loc_count = -1 count = -1 for folder in folders: label_dir = os.path.join(BASE_DIR,folder,LABEL_DIR) loc_dir = os.path.join(BASE_DIR,folder,LOC_DIR) label_count= len([name for name in os.listdir(label_dir) if name.endswith('.txt')]) loc_count= len([name for name in os.listdir(loc_dir) if name.endswith('.txt')]) if label_count == loc_count: if count == label_count or count == -1: count = label_count else: print("Folder %s doesn't match in count" %(folder)) return None else: print("Folder %s has unmatched loc and label counts" %folder) return None return count def txm_bbox(loc,dim): WINDOW_WIDTH = 1248 WINDOW_HEIGHT = 384 MINI_WINDOW_WIDTH = 320 MINI_WINDOW_HEIGHT = 180 WINDOW_WIDTH_HALF = WINDOW_WIDTH / 2 WINDOW_HEIGHT_HALF = WINDOW_HEIGHT / 2 k =
np.identity(3)
numpy.identity
# AUTOGENERATED! DO NOT EDIT! File to edit: 03_shape.ipynb (unless otherwise specified). __all__ = ['getElemetType', 'tria_scheme', 'tetra_scheme', 'getGaussPoints', 'getShapeLine2', 'getShapeLine3', 'getShapeTria3', 'getShapeTria6', 'getShapeQuad4', 'getShapeQuad8', 'getShapeQuad9', 'getShapeTetra4', 'getShapeTetra10', 'getShapeHexa8', 'getShapeHexa20', 'getAllShapeFunctions'] # Cell import numpy as np from scipy.special.orthogonal import p_roots as gauss_scheme np.set_printoptions(precision=4) # Cell def getElemetType(elemCoords): "Determine the element type" dict = { "numDim_1": { "numNodes_2": "Line2", "numNodes_3": "Line3" }, "numDim_2": { "numNodes_3": "Tria3", "numNodes_4": "Quad4", "numNodes_6": "Tria6", "numNodes_8": "Quad8", "numNodes_9": "Quad9", }, "numDim_3": { "numNodes_4": "Tetra4", "numNodes_8": "Hexa8", "numNodes_10": "Tetra10", "numNodes_20": "Hexa20" }, } try: numNodes = elemCoords.shape[0] numDim = elemCoords.shape[1] if elemCoords.shape[1] else 1 ElemType = dict.get(f"numDim_{numDim}").get(f"numNodes_{numNodes}") if ElemType: return ElemType else: raise NotImplementedError( f"No {numDim}D element with {numNodes} nodes is available" ) except NotImplementedError as error: print(error) except IndexError: print("No valid coordinates array") except AttributeError: print("No valid coordinates array") except TypeError: print("No valid coordinates array") # Cell def tria_scheme(order): if order is 1: xi = [[1./3., 1./3.]] weight = [[1.]] elif order is 3: r1 = 1./6. r2 = 2./3. w1 = 1./3. xi = [[r1,r1],[r2,r1],[r1,r2]] weight = [[w1],[w1],[w1]] elif order is 4: r1 = 1./5. r2 = 3./5. r3 = 1./3. w1 = 0.52083333 w2 = 0.52083333 w3 = 0.52083333 w4 = -0.56250000 xi = [[r1,r1],[r2,r1],[r1,r2],[r3,r3]] weight = [[w1],[w2],[w3],[w4]] return xi, weight # Cell def tetra_scheme(order): if order is 1: xi = [[1./4., 1./4., 1./4.]] weight = [[1.]] elif order is 4: r1 = 0.5854102 r2 = 0.1381966 w1 = 1./4. xi = [[r1,r2,r2],[r2,r1,r2],[r2,r2,r1],[r2,r2,r2]] weight = [[w1],[w1],[w1],[w1]] elif order is 5: r1 = 1./4. r2 = 1./2. r3 = 1./6. w1 = 9./20. w2 = -4./5. xi = [[r2,r3,r3],[r3,r2,r3],[r3,r3,r2],[r3,r3,r3],[r1,r1,r1]] weight = [[w1],[w1],[w1],[w1],[w2]] return xi, weight # Cell def getGaussPoints(elemType, reduced=False): point = [] weight = [] if "Line" in elemType: stdOrder = 2 if "2" in elemType else 3 if reduced: stdOrder -= 1 ip, w = gauss_scheme(stdOrder) point = [[ip[i]] for i in range(stdOrder)] weight = [[w[i]] for i in range(stdOrder)] elif "Tria" in elemType: stdOrder = 1 if "3" in elemType else 4 if stdOrder == 4 and reduced: stdOrder = 3 point, weight = tria_scheme(stdOrder) elif "Quad" in elemType: stdOrder = 2 if "4" in elemType else 3 if reduced: stdOrder -= 1 ip, w = gauss_scheme(stdOrder) point = [[ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder)] weight = [[w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder)] elif "Tetra" in elemType: stdOrder = 1 if "4" in elemType else 5 if stdOrder == 5 and reduced: stdOrder = 4 point, weight = tetra_scheme(stdOrder) elif "Hexa" in elemType: stdOrder = 2 if "8" in elemType else 3 if reduced: stdOrder -= 1 ip, w = gauss_scheme(stdOrder) point = [[ip[k], ip[j], ip[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)] weight = [[w[k]*w[j]*w[i]] for i in range(stdOrder) for j in range(stdOrder) for k in range(stdOrder)] return np.array(point), np.array(weight) # Cell def getShapeLine2(gaussPoint): # Check the dimension of physical space if gaussPoint.shape[0] != 1: raise NotImplementedError("1D only") ############################################################################################################ # gauss points coords xi = gaussPoint[0] ############################################################################################################ # Tuple with xi_a combinatory xi_comb = [-1,1] ############################################################################################################ # Calculate shape functions N = np.array([0.5*(1+sign*xi) for sign in xi_comb]) ############################################################################################################ # Calculate derivatives of shape functions-> xi dN = np.array([0.5*sign for sign in xi_comb]) return N, dN # Cell def getShapeLine3(gaussPoint): # Check the dimension of physical space if gaussPoint.shape[0] != 1: raise NotImplementedError("1D only") ############################################################################################################ # gauss points coords xi = gaussPoint[0] ############################################################################################################ # Tuple with xi_a combinatory xi_comb = [-1,1] ############################################################################################################ # Calculate shape functions N_lateral = np.array([0.5*item*xi*(1+item*xi) for item in xi_comb]) N_middle = np.array([(1+xi)*(1-xi)]) N = np.hstack((N_lateral[0], N_middle, N_lateral[1])) ############################################################################################################ # Calculate derivatives of shape functions -> xi dN_lateral = np.array([0.5*item*(1+2.*item*xi) for item in xi_comb]) dN_middle = np.array([-2.*xi]) dN = np.hstack((dN_lateral[0], dN_middle, dN_lateral[1])) return N, dN # Cell def getShapeTria3(gaussPoint): # Check the dimension of physical space if gaussPoint.shape[0] != 2: raise NotImplementedError("2D only") ############################################################################################################ # gauss points coords L1 = gaussPoint[0] L2 = gaussPoint[1] L3 = 1-L1-L2 ############################################################################################################ # Calculate shape functions N = np.array([L1, L2, L3]) ############################################################################################################ # Calculate derivatives of shape functions-> xi dN_dxi = np.array([1., 0., -1.]) # Calculate derivatives of shape functions-> eta dN_deta =
np.array([0., 1., -1.])
numpy.array
# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """MaskRcnn tpositive and negative sample screening for Rcnn.""" import numpy as np import mindspore.nn as nn import mindspore.common.dtype as mstype from mindspore.ops import operations as P from mindspore.common.tensor import Tensor from mindspore import context class BboxAssignSampleForRcnn(nn.Cell): """ Bbox assigner and sampler definition. Args: config (dict): Config. batch_size (int): Batchsize. num_bboxes (int): The anchor nums. add_gt_as_proposals (bool): add gt bboxes as proposals flag. Returns: Tensor, multiple output tensors. Examples: BboxAssignSampleForRcnn(config, 2, 1024, True) """ def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals): super(BboxAssignSampleForRcnn, self).__init__() cfg = config if context.get_context("device_target") == "Ascend": self.cast_type = mstype.float16 self.np_cast_type = np.float16 else: self.cast_type = mstype.float32 self.np_cast_type = np.float32 self.batch_size = batch_size self.neg_iou_thr = cfg.neg_iou_thr_stage2 self.pos_iou_thr = cfg.pos_iou_thr_stage2 self.min_pos_iou = cfg.min_pos_iou_stage2 self.num_gts = cfg.num_gts self.num_bboxes = num_bboxes self.num_expected_pos = cfg.num_expected_pos_stage2 self.num_expected_neg = cfg.num_expected_neg_stage2 self.num_expected_total = cfg.num_expected_total_stage2 self.add_gt_as_proposals = add_gt_as_proposals self.label_inds = Tensor(np.arange(1, self.num_gts + 1).astype(np.int32)) self.add_gt_as_proposals_valid = Tensor(np.array(self.add_gt_as_proposals * np.ones(self.num_gts), dtype=np.int32)) self.concat = P.Concat(axis=0) self.max_gt = P.ArgMaxWithValue(axis=0) self.max_anchor = P.ArgMaxWithValue(axis=1) self.sum_inds = P.ReduceSum() self.iou = P.IOU() self.greaterequal = P.GreaterEqual() self.greater = P.Greater() self.select = P.Select() self.gatherND = P.GatherNd() self.squeeze = P.Squeeze() self.cast = P.Cast() self.logicaland = P.LogicalAnd() self.less = P.Less() self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos) self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg) self.reshape = P.Reshape() self.equal = P.Equal() self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2)) self.concat_axis1 = P.Concat(axis=1) self.logicalnot = P.LogicalNot() self.tile = P.Tile() # Check self.check_gt_one = Tensor(np.array(-1 *
np.ones((self.num_gts, 4))
numpy.ones
import numpy as np from collections import defaultdict as ddict from ptsnet.arrays import Table2D, Table, ObjArray from ptsnet.parallel.partitioning import even, get_partition from ptsnet.simulation.constants import MEM_POOL_POINTS, PIPE_START_RESULTS, PIPE_END_RESULTS, NODE_RESULTS, CLOSED_PROTECTION_RESULTS, POINT_PROPERTIES, G from ptsnet.utils.data import is_array from ptsnet.arrays.selectors import SelectorSet from ptsnet.simulation.funcs import run_general_junction, run_interior_step, run_pump_step, run_valve_step, run_open_protections, run_closed_protections from ptsnet.profiler.profiler import Profiler class Worker: def __init__(self, **kwargs): self.send_queue = None self.recv_queue = None self.router = kwargs['router'] self.wn = kwargs['wn'] self.ss = kwargs['ss'] self.global_where = kwargs['where'] self.time_step = kwargs['time_step'] self.time_steps = kwargs['time_steps'] self.mem_pool_points = None self.point_properties = None self.num_nodes = 0 # number of nodes in worker self.num_start_pipes = 0 # number of start pipes in worker self.num_end_pipes = 0 # number of end pipes in worker self.num_jip_nodes = 0 # number of just-in-pipes junction nodes in worker self.num_open_protections = 0 # number of open surge protections self.num_closed_protections = 0 # number of closed surge protections self.where = SelectorSet(['points', 'pipes', 'nodes', 'valves', 'pumps', 'open_protections', 'closed_protections']) self.processors = even(kwargs['num_points'], self.router['main'].size) self.is_innactive = False innactive_processors = np.empty(self.router['main'].size, dtype=bool) self.results = {} self.profiler_on = kwargs['profiler_on'] self.profiler = Profiler(self.router['main'].rank, is_on = self.profiler_on) ### self.profiler.start('get_partition') self.partition = get_partition( self.processors, self.router['main'].rank, self.global_where, self.ss, self.wn, self.router['main'].size, kwargs['inpfile']) self.profiler.stop('get_partition') ### ### self.profiler.start('check_processor_innactivity') if self.partition is None: self.is_innactive = True self.router['main'].Alltoall(np.ones(self.router['main'].size, dtype=bool)*self.is_innactive, innactive_processors) if np.any(innactive_processors): self.is_innactive = True self.profiler.stop('check_processor_innactivity') ### raise SystemError(" Partitioning is innecficient due to unused processor(s), try executing the parallel routine with less processors") self.profiler.stop('check_processor_innactivity') ### self.points = self.partition['points']['global_idx'] self.num_points = len(self.points) # ponts assigned to the worker self.local_points = np.arange(self.num_points) ### self.profiler.start('_create_selectors') self._create_selectors() self.profiler.stop('_create_selectors') ### ### self.profiler.start('_define_worker_comm_queues') self._define_worker_comm_queues() self.profiler.stop('_define_worker_comm_queues') ### ### self.profiler.start('_define_dist_graph_comm') self._define_dist_graph_comm() self.profiler.stop('_define_dist_graph_comm') ### self._comm_buffer_head = [] self._recv_points = [] for r in self.recv_queue.values: self._comm_buffer_head.append(np.zeros(len(r))) self._recv_points.extend(r) self._comm_buffer_flow = np.array(self._comm_buffer_head, dtype = object) self._comm_buffer_head = np.array(self._comm_buffer_head, dtype = object) ### self.profiler.start('_allocate_memory') if self.router['main'].rank == 0: self.local_to_global = {} self._allocate_memory() self.profiler.stop('_allocate_memory') ### ### self.profiler.start('_load_initial_conditions') self._load_initial_conditions() self.profiler.stop('_load_initial_conditions') ### def _allocate_memory(self): self.mem_pool_points = Table2D(MEM_POOL_POINTS, self.num_points, 2) self.point_properties = Table(POINT_PROPERTIES, self.num_points) if self.num_nodes > 0: self.results['node'] = Table2D(NODE_RESULTS, self.num_nodes, self.time_steps, labels = self.ss['node'].labels[self.where.nodes['all_to_points',]]) are_my_uboundaries = self.global_where.points['are_uboundaries'] \ [self.processors[self.global_where.points['are_uboundaries']] == self.router['main'].rank] self.where.points['are_my_uboundaries'] = self.local_points[np.isin(self.points, are_my_uboundaries)] are_my_dboundaries = self.global_where.points['are_dboundaries'] \ [self.processors[self.global_where.points['are_dboundaries']] == self.router['main'].rank] self.where.points['are_my_dboundaries'] = self.local_points[np.isin(self.points, are_my_dboundaries)] ppoints_start = self.points[self.where.points['are_my_dboundaries']] ppoints_end = self.points[self.where.points['are_my_uboundaries']] pipes_start = self.global_where.points['to_pipes'][ppoints_start] pipes_end = self.global_where.points['to_pipes'][ppoints_end] self.num_start_pipes = len(ppoints_start) self.num_end_pipes = len(ppoints_end) if self.num_start_pipes > 0: self.results['pipe.start'] = Table2D(PIPE_START_RESULTS, len(ppoints_start), self.time_steps, labels = self.ss['pipe'].labels[pipes_start]) if self.num_end_pipes > 0: self.results['pipe.end'] = Table2D(PIPE_END_RESULTS, len(ppoints_end), self.time_steps, labels = self.ss['pipe'].labels[pipes_end]) # Root processor gathers indexes to facilitate reading results node_indexes = self.router['main'].gather(self.where.nodes['all_to_points',], root = 0) pipe_start_indexes = self.router['main'].gather(pipes_start, root = 0) pipe_end_indexes = self.router['main'].gather(pipes_end, root = 0) if self.router['main'].rank == 0: node_indexes = np.concatenate(node_indexes) pipe_start_indexes = np.concatenate(pipe_start_indexes) pipe_end_indexes = np.concatenate(pipe_end_indexes) node_labels = self.ss['node'].labels[node_indexes] pipe_start_labels = self.ss['pipe'].labels[pipe_start_indexes] pipe_end_labels = self.ss['pipe'].labels[pipe_end_indexes] self.local_to_global['node'] = {l : i for i, l in enumerate(node_labels)} self.local_to_global['pipe.start'] = {l : i for i, l in enumerate(pipe_start_labels)} self.local_to_global['pipe.end'] = {l : i for i, l in enumerate(pipe_end_labels)} def _define_dist_graph_comm(self): self.router.add_communicator('local', self.router['main'].Create_dist_graph_adjacent( sources = self.recv_queue.labels, destinations = self.send_queue.labels, sourceweights = list(map(len, self.recv_queue.values)), destweights = list(map(len, self.send_queue.values)))) def _define_worker_comm_queues(self): local_points = self.partition['points']['local_idx'] pp = self.processors[self.points] pp_idx = np.where(pp != self.router['main'].rank)[0] ppoints = self.points[pp_idx] # Define receive queue self.recv_queue = ObjArray() for i, p in enumerate(pp_idx): if not pp[p] in self.recv_queue.indexes: self.recv_queue[pp[p]] = [] self.recv_queue[pp[p]].append(ppoints[i]) # Define send queue self.send_queue = ObjArray() uboundaries = self.points[self.where.points['are_uboundaries']] dboundaries = self.points[self.where.points['are_dboundaries']] inner = self.points[self.where.points['are_inner']] for p in self.recv_queue.labels: self.recv_queue[p] = np.sort(self.recv_queue[p]) urq = np.isin(self.recv_queue[p], uboundaries) drq = np.isin(self.recv_queue[p], dboundaries) irq = np.isin(self.recv_queue[p], inner) extra_b = np.append(self.recv_queue[p][urq] - 1, self.recv_queue[p][drq] + 1) extra_i = np.append(self.recv_queue[p][irq] - 1, self.recv_queue[p][irq] + 1) extra = np.append(extra_b, extra_i) reduced_extra = extra[np.isin(extra, self.points)] real_extra = [local_points[r] for r in reduced_extra[self.processors[reduced_extra] == self.router['main'].rank]] # local idx if len(real_extra) > 0: if not p in self.send_queue.indexes: self.send_queue[p] = [] self.send_queue[p].extend(real_extra) self.recv_queue[p] = np.sort([local_points[r] for r in self.recv_queue[p]]) # convert to local idx for p in self.send_queue.labels: self.send_queue[p] = np.sort(np.unique(self.send_queue[p])) def _create_selectors(self): jip_nodes = self.partition['nodes']['global_idx'] lpoints = self.partition['points']['local_idx'] self.where.points['just_in_pipes'] = np.array([lpoints[npoint] for npoint in self.partition['nodes']['points']]).astype(int) self.where.points['are_tanks'] = np.where(np.isin(self.points, self.partition['tanks']['points']))[0] self.where.points['are_reservoirs'] = np.where(np.isin(self.points, self.partition['reservoirs']['points']))[0] njip = np.cumsum(self.partition['nodes']['context']) self.where.nodes['just_in_pipes',] = njip[:-1] self.where.nodes['to_points'] = self.where.points['just_in_pipes'][self.where.nodes['just_in_pipes',][:-1]] nonpipe = np.isin(self.global_where.points['are_boundaries'], self.global_where.points['are_valve']) nonpipe = nonpipe | np.isin(self.global_where.points['are_boundaries'], self.global_where.points['are_pump']) local_points =
np.isin(self.global_where.points['are_boundaries'], self.points[self.processors[self.points] == self.router['main'].rank])
numpy.isin
import streamlit as st import IPython.display as ipd from pydub import AudioSegment import numpy as np from PIL import Image from load_css import local_css import pandas as pd import os import datetime # librosa is a Python library for analyzing audio and music. It can be used to extract the data from the audio files we will see it later. import librosa import librosa.display import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, OneHotEncoder # to play the audio files from IPython.display import Audio import tensorflow as tf import keras from keras.models import Sequential from keras.layers import * import base64 @st.cache(allow_output_mutation=True) def get_base64_of_bin_file(bin_file): with open(bin_file, 'rb') as f: data = f.read() return base64.b64encode(data).decode() @st.cache(allow_output_mutation=True) def load_our_model(): model = tf.keras.models.load_model('covidtest.h5') return model local_css("style.css") st.markdown(" <h1 style='text-align: center; color: black;'><span class='highlight slateblue'>Corona Detection App</span></h1>", unsafe_allow_html=True) st.markdown("\n") st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>To Know about the working of App and to Display Wave Plot, please click</span></h3>", unsafe_allow_html=True) st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>on Expand to show option Button below.</span></h3>", unsafe_allow_html=True) my_expander = st.beta_expander("Expand to show option", expanded=False) with my_expander: choice = st.multiselect("Enter Your Choice", ('How does it work ?', 'Display Wave Plot')) if 'How does it work ?' in choice: st.markdown("<div><span class='highlight blue'>Hello and Welcome to our AI enabled Covid Detection App.Let us describe you how it works :- </span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'>• Upload an audio of about three seconds in which your cough sound can be heard clearly </span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'> by clicking on the Browse Files button </span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'>• Once the file is uploaded the AI Model will display the result on the screen.. </span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'>• Once your result is displayed and you want to obtain a prediction for any other audio file then</span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'> it is recommended to reload the page. </span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'>• At last, we wish you to stay healthy and Covid Negative. Don't forget to wear Mask and</span></div>", unsafe_allow_html=True) st.markdown("<div><span class='highlight blue'> maintain Social Distancing.</span><div>", unsafe_allow_html=True) st.markdown("\n") st.markdown(" <h3 style='text-align: center; color: black;'><span class='highlight slateblue'>Upload Your Audio File Below</span></h3>", unsafe_allow_html=True) st.markdown(" <h5 style='text-align: center; color: black;'><span class='highlight slateblue'>The audio file should be of about three seconds containing the cough sound.</span></h5>", unsafe_allow_html=True) def set_png_as_page_bg(png_file): bin_str = get_base64_of_bin_file(png_file) page_bg_img = ''' <style> body { background-image: url("data:image/png;base64,%s"); background-size: cover; } </style> ''' % bin_str st.markdown(page_bg_img, unsafe_allow_html=True) return set_png_as_page_bg('abcd.png') from numpy import load Y = load('Y.npy', allow_pickle = True) encoder = OneHotEncoder() Y = encoder.fit_transform(np.array(Y).reshape(-1,1)).toarray() tr =
load('tr.npy')
numpy.load
""" Module for customising opensim segmented muscle points """ import os import re import math import json import shutil import numpy as np import copy from gias3.musculoskeletal.bonemodels import bonemodels from gias3.musculoskeletal import osim from mapclientplugins.gait2392somsomusclestep.muscleVolumeCalculator import muscleVolumeCalculator from numpy import pi from scipy.interpolate import interp1d SELF_DIR = os.path.split(__file__)[0] DATA_DIR = os.path.join(SELF_DIR, 'data/node_numbers/') TEMPLATE_OSIM_PATH = os.path.join(SELF_DIR, 'data', 'gait2392_simbody_wrap.osim') VALID_SEGS = {'pelvis', 'femur-l', 'femur-r', 'tibia-l', 'tibia-r'} OSIM_FILENAME = 'gait2392_simbody.osim' VALID_UNITS = ('nm', 'um', 'mm', 'cm', 'm', 'km') TIBFIB_SUBMESHES = ('tibia', 'fibula') TIBFIB_SUBMESH_ELEMS = {'tibia': range(0, 46), 'fibula': range(46, 88), } TIBFIB_BASISTYPES = {'tri10': 'simplex_L3_L3', 'quad44': 'quad_L3_L3'} def dim_unit_scaling(in_unit, out_unit): """ Calculate the scaling factor to convert from the input unit (in_unit) to the output unit (out_unit). in_unit and out_unit must be a string and one of ['nm', 'um', 'mm', 'cm', 'm', 'km']. inputs ====== in_unit : str Input unit out_unit :str Output unit returns ======= scaling_factor : float """ unit_vals = { 'nm': 1e-9, 'um': 1e-6, 'mm': 1e-3, 'cm': 1e-2, 'm': 1.0, 'km': 1e3, } if in_unit not in unit_vals: raise ValueError( 'Invalid input unit {}. Must be one of {}'.format( in_unit, list(unit_vals.keys()) ) ) if out_unit not in unit_vals: raise ValueError( 'Invalid input unit {}. Must be one of {}'.format( in_unit, list(unit_vals.keys()) ) ) return unit_vals[in_unit] / unit_vals[out_unit] def update_femur_opensim_acs(femur_model): femur_model.acs.update( *bonemodels.model_alignment.createFemurACSOpenSim( femur_model.landmarks['femur-HC'], femur_model.landmarks['femur-MEC'], femur_model.landmarks['femur-LEC'], side=femur_model.side ) ) def update_tibiafibula_opensim_acs(tibiafibula_model): tibiafibula_model.acs.update( *bonemodels.model_alignment.createTibiaFibulaACSOpenSim( tibiafibula_model.landmarks['tibiafibula-MM'], tibiafibula_model.landmarks['tibiafibula-LM'], tibiafibula_model.landmarks['tibiafibula-MC'], tibiafibula_model.landmarks['tibiafibula-LC'], side=tibiafibula_model.side ) ) def split_tibia_fibula_gfs(tib_fib_gf): tib = tib_fib_gf.makeGFFromElements( 'tibia', TIBFIB_SUBMESH_ELEMS['tibia'], TIBFIB_BASISTYPES, ) fib = tib_fib_gf.makeGFFromElements( 'fibula', TIBFIB_SUBMESH_ELEMS['fibula'], TIBFIB_BASISTYPES, ) return tib, fib def local_osim_2_global(body, model): # find the knee angle knee = model.joints['knee_l'] kneeAngle = model.joints['knee_l'].coordSets['knee_angle_l'].defaultValue knee_lTrans = np.zeros(3) # get the spline values trans1X = knee.getSimmSplineParams('translation1')[0] trans1Y = knee.getSimmSplineParams('translation1')[1] f = interp1d(trans1X, trans1Y, kind='cubic') knee_lTrans[0] = f(kneeAngle) trans2X = knee.getSimmSplineParams('translation2')[0] trans2Y = knee.getSimmSplineParams('translation2')[1] f2 = interp1d(trans2X, trans2Y, kind='cubic') knee_lTrans[1] = f2(kneeAngle) # find the knee angle knee = model.joints['knee_r'] kneeAngle = model.joints['knee_r'].coordSets['knee_angle_r'].defaultValue knee_rTrans = np.zeros(3) # get the spline values trans1X = knee.getSimmSplineParams('translation1')[0] trans1Y = knee.getSimmSplineParams('translation1')[1] f = interp1d(trans1X, trans1Y, kind='cubic') knee_rTrans[0] = f(kneeAngle) trans2X = knee.getSimmSplineParams('translation2')[0] trans2Y = knee.getSimmSplineParams('translation2')[1] f2 = interp1d(trans2X, trans2Y, kind='cubic') knee_rTrans[1] = f2(kneeAngle) trans = None if body == 'pelvis': trans = np.zeros(3) elif body == 'femur_l': trans = model.joints['hip_l'].locationInParent elif body == 'femur_r': trans = model.joints['hip_r'].locationInParent elif body == 'tibia_l': trans = (model.joints['hip_l'].locationInParent + knee_lTrans) elif body == 'tibia_r': trans = (model.joints['hip_r'].locationInParent + knee_rTrans) elif body == 'talus_l': trans = (model.joints['hip_l'].locationInParent + knee_lTrans + model.joints['ankle_l'].locationInParent) elif body == 'talus_r': trans = (model.joints['hip_r'].locationInParent + knee_rTrans + model.joints['ankle_r'].locationInParent) elif body == 'calcn_l': trans = (model.joints['hip_l'].locationInParent + knee_lTrans + model.joints['ankle_l'].locationInParent + model.joints['subtalar_l'].locationInParent) elif body == 'calcn_r': trans = (model.joints['hip_r'].locationInParent + knee_rTrans + model.joints['ankle_r'].locationInParent + model.joints['subtalar_r'].locationInParent) elif body == 'toes_l': trans = (model.joints['hip_l'].locationInParent + knee_lTrans + model.joints['ankle_l'].locationInParent + model.joints['subtalar_l'].locationInParent + model.joints['mtp_l'].locationInParent) elif body == 'toes_r': trans = (model.joints['hip_r'].locationInParent + knee_rTrans + model.joints['ankle_r'].locationInParent + model.joints['subtalar_r'].locationInParent + model.joints['mtp_r'].locationInParent) return trans class Gait2392MuscleCustomiser(object): def __init__(self, config, ll=None, osimmodel=None, landmarks=None): """ Class for customising gait2392 muscle points using host-mesh fitting inputs ====== config : dict Dictionary of option. (work in progress) Example: { 'osim_output_dir': '/path/to/output/model.osim', 'in_unit': 'mm', 'out_unit': 'm', 'write_osim_file': True, 'update_knee_splines': False, 'static_vas': False, } ll : LowerLimbAtlas instance Model of lower limb bone geometry and pose osimmodel : opensim.Model instance The opensim model instance to customise """ self.config = config self.ll = ll self.trcdata = landmarks self.gias_osimmodel = None self._workflow_location = None if osimmodel is not None: self.set_osim_model(osimmodel) self._unit_scaling = dim_unit_scaling( self.config['in_unit'], self.config['out_unit'] ) def set_osim_model(self, model): self.gias_osimmodel = osim.Model(model=model) def cust_pelvis(self): pelvis = self.ll.models['pelvis'] # load the pelvis muscle attachment node numbers with open(DATA_DIR + 'pelvisNodeNumbers.txt') as infile: pelvisData = json.load(infile) pelvisAttachmentNodeNums = list(pelvisData.values()) pelvisMuscleNames = list(pelvisData.keys()) pelvisMuscleNames = [str(item) for item in pelvisMuscleNames] # This method appears to be taking quite a while to complete (like 5 # minutes), is this expected? This wasn't being used in musclecusthfm. # the muscle attachments were selected an a 24x24 mesh pelvisPoints, lhF = pelvis.gf.triangulate([24, 24]) # Align the discretised pelvis points and the muscle attachments to the # opensims pelvis local coordinate system. localPelvisPoints = pelvis.acs.map_local(pelvisPoints) / 1000 pelvisAttachments = localPelvisPoints[pelvisAttachmentNodeNums] for i in range(len(pelvisMuscleNames)): muscle = self.gias_osimmodel.muscles[str(pelvisMuscleNames[i])] pathPoints = muscle.path_points s = sorted(muscle.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPoints[s[0]].body.name == 'pelvis': aSite = 0 elif pathPoints[s[-1]].body.name == 'pelvis': aSite = -1 # update the location of the pathpoint pp = pathPoints[s[aSite]] pp.location = pelvisAttachments[i] def cust_femur_l(self): leftFemur = self.ll.models['femur-l'] # load in the femur muscle attachment node numbers with open(DATA_DIR + 'leftFemurNodeNumbers.txt') as infile: leftFemurData = json.load(infile) leftFemurAttachmentNodeNums = list(leftFemurData.values()) leftFemurMuscleNames = list(leftFemurData.keys()) leftFemurMuscleNames = [str(item) for item in leftFemurMuscleNames] # update the geometric field coordinate system to match opensims update_femur_opensim_acs(leftFemur) # the muscle attachments were selected an a 24x24 mesh leftFemurPoints, lhF = leftFemur.gf.triangulate([24, 24]) # align the discretised femur points and the muscle attachments to the # opensims femur local coordinate system localLeftFemurPoints = leftFemur.acs.map_local(leftFemurPoints) / 1000 leftFemurAttachments = localLeftFemurPoints[ leftFemurAttachmentNodeNums] for i in range(len(leftFemurMuscleNames)): muscleLeft = self.gias_osimmodel.muscles[ str(leftFemurMuscleNames[i])] pathPointsLeft = muscleLeft.path_points sL = sorted(muscleLeft.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsLeft[sL[0]].body.name == 'femur_l': aSite = 0 elif pathPointsLeft[sL[-1]].body.name == 'femur_l': aSite = -1 # update the location of the pathpoint ppL = pathPointsLeft[sL[aSite]] ppL.location = leftFemurAttachments[i] def cust_femur_r(self): rightFemur = self.ll.models['femur-r'] rightFemur.side = 'right' with open(DATA_DIR + 'rightFemurNodeNumbers.txt') as infile: rightFemurData = json.load(infile) rightFemurAttachmentNodeNums = list(rightFemurData.values()) rightFemurMuscleNames = list(rightFemurData.keys()) rightFemurMuscleNames = [str(item) for item in rightFemurMuscleNames] # update the geometric field coordinate system to match opensims update_femur_opensim_acs(rightFemur) rightFemurPoints, rhF = rightFemur.gf.triangulate([24, 24]) localRightFemurPoints = rightFemur.acs.map_local( rightFemurPoints) / 1000 rightFemurAttachments = localRightFemurPoints[ rightFemurAttachmentNodeNums] # update attachments for i in range(len(rightFemurMuscleNames)): muscleRight = self.gias_osimmodel.muscles[ str(rightFemurMuscleNames[i])] pathPointsRight = muscleRight.path_points sR = sorted(muscleRight.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsRight[sR[0]].body.name == 'femur_r': aSite = 0 elif pathPointsRight[sR[-1]].body.name == 'femur_r': aSite = -1 ppR = pathPointsRight[sR[aSite]] ppR.location = rightFemurAttachments[i] def cust_tibia_l(self): # The tibia, patella and fibula all use the same fieldwork model to # align with opensim leftTibFib = self.ll.models['tibiafibula-l'] leftPatella = self.ll.models['patella-l'] update_tibiafibula_opensim_acs(leftTibFib) leftTib, leftFib = split_tibia_fibula_gfs(leftTibFib.gf) # load in the tibia muscle attachment node numbers with open(DATA_DIR + 'leftTibiaNodeNumbers.txt') as infile: leftTibiaData = json.load(infile) leftTibiaAttachmentNodeNums = list(leftTibiaData.values()) leftTibiaMuscleNames = list(leftTibiaData.keys()) leftTibiaMuscleNames = [str(item) for item in leftTibiaMuscleNames] # load in the fibula muscle attachment node numbers with open(DATA_DIR + 'leftFibulaNodeNumbers.txt') as infile: leftFibulaData = json.load(infile) leftFibulaAttachmentNodeNums = list(leftFibulaData.values()) leftFibulaMuscleNames = list(leftFibulaData.keys()) leftFibulaMuscleNames = [str(item) for item in leftFibulaMuscleNames] # load in the patella muscle attachment node numbers with open(DATA_DIR + 'leftPatellaNodeNumbers.txt') as infile: leftPatellaData = json.load(infile) leftPatellaAttachmentNodeNums = list(leftPatellaData.values()) leftPatellaMuscleNames = list(leftPatellaData.keys()) leftPatellaMuscleNames = [str(item) for item in leftPatellaMuscleNames] leftTibiaPoints, lhF = leftTib.triangulate([24, 24]) leftFibulaPoints, lhF = leftFib.triangulate([24, 24]) leftPatellaPoints, lhf = leftPatella.gf.triangulate([24, 24]) localLeftTibiaPoints = leftTibFib.acs.map_local(leftTibiaPoints) / 1000 leftTibiaAttachments = localLeftTibiaPoints[ leftTibiaAttachmentNodeNums] localLeftFibulaPoints = leftTibFib.acs.map_local( leftFibulaPoints) / 1000 leftFibulaAttachments = localLeftFibulaPoints[ leftFibulaAttachmentNodeNums] localLeftPatellaPoints = leftTibFib.acs.map_local( leftPatellaPoints) / 1000 leftPatellaAttachments = localLeftPatellaPoints[ leftPatellaAttachmentNodeNums] # update the tibia attachments for i in range(len(leftTibiaMuscleNames)): muscleLeft = self.gias_osimmodel.muscles[ str(leftTibiaMuscleNames[i])] pathPointsLeft = muscleLeft.path_points sL = sorted(muscleLeft.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsLeft[sL[0]].body.name == 'tibia_l': aSite = 0 elif pathPointsLeft[sL[-1]].body.name == 'tibia_l': aSite = -1 ppL = pathPointsLeft[sL[aSite]] ppL.location = leftTibiaAttachments[i] # update the fibula attachments for i in range(len(leftFibulaMuscleNames)): muscleLeft = self.gias_osimmodel.muscles[ str(leftFibulaMuscleNames[i])] pathPointsLeft = muscleLeft.path_points sL = sorted(muscleLeft.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsLeft[sL[0]].body.name == 'tibia_l': aSite = 0 elif pathPointsLeft[sL[-1]].body.name == 'tibia_l': aSite = -1 ppL = pathPointsLeft[sL[aSite]] ppL.location = leftFibulaAttachments[i] # update the patella attachments for i in range(len(leftPatellaMuscleNames)): muscleLeft = self.gias_osimmodel.muscles[ str(leftPatellaMuscleNames[i])] pathPointsLeft = muscleLeft.path_points sL = sorted(muscleLeft.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsLeft[sL[0]].body.name == 'tibia_l': aSite = 0 elif pathPointsLeft[sL[-1]].body.name == 'tibia_l': aSite = -1 ppL = pathPointsLeft[sL[aSite]] ppL.location = leftPatellaAttachments[i] def cust_tibia_r(self): rightTibFib = self.ll.models['tibiafibula-r'] rightPatella = self.ll.models['patella-r'] update_tibiafibula_opensim_acs(rightTibFib) rightTib, rightFib = split_tibia_fibula_gfs(rightTibFib.gf) # load in the tibia attachment node numbers with open(DATA_DIR + 'rightTibiaNodeNumbers.txt') as infile: rightTibiaData = json.load(infile) rightTibiaAttachmentNodeNums = list(rightTibiaData.values()) rightTibiaMuscleNames = list(rightTibiaData.keys()) rightTibiaMuscleNames = [str(item) for item in rightTibiaMuscleNames] # load in the fibula attachment node numbers with open(DATA_DIR + 'rightFibulaNodeNumbers.txt') as infile: rightFibulaData = json.load(infile) rightFibulaAttachmentNodeNums = list(rightFibulaData.values()) rightFibulaMuscleNames = list(rightFibulaData.keys()) rightFibulaMuscleNames = [str(item) for item in rightFibulaMuscleNames] # load in the patella attachment node numbers with open(DATA_DIR + 'rightPatellaNodeNumbers.txt') as infile: rightPatellaData = json.load(infile) rightPatellaAttachmentNodeNums = list(rightPatellaData.values()) rightPatellaMuscleNames = list(rightPatellaData.keys()) rightPatellaMuscleNames = [ str(item) for item in rightPatellaMuscleNames] rightTibiaPoints, lhF = rightTib.triangulate([24, 24]) rightFibulaPoints, lhF = rightFib.triangulate([24, 24]) rightPatellaPoints, lhf = rightPatella.gf.triangulate([24, 24]) localRightTibiaPoints = rightTibFib.acs.map_local( rightTibiaPoints) / 1000 rightTibiaAttachments = localRightTibiaPoints[ rightTibiaAttachmentNodeNums] localRightFibulaPoints = rightTibFib.acs.map_local( rightFibulaPoints) / 1000 rightFibulaAttachments = localRightFibulaPoints[ rightFibulaAttachmentNodeNums] localRightPatellaPoints = rightTibFib.acs.map_local( rightPatellaPoints) / 1000 rightPatellaAttachments = localRightPatellaPoints[ rightPatellaAttachmentNodeNums] for i in range(len(rightTibiaMuscleNames)): muscleRight = self.gias_osimmodel.muscles[ str(rightTibiaMuscleNames[i])] pathPointsRight = muscleRight.path_points sR = sorted(muscleRight.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsRight[sR[0]].body.name == 'tibia_r': aSite = 0 elif pathPointsRight[sR[-1]].body.name == 'tibia_r': aSite = -1 ppR = pathPointsRight[sR[aSite]] ppR.location = rightTibiaAttachments[i] for i in range(len(rightFibulaMuscleNames)): muscleRight = self.gias_osimmodel.muscles[ str(rightFibulaMuscleNames[i])] pathPointsRight = muscleRight.path_points sR = sorted(muscleRight.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsRight[sR[0]].body.name == 'tibia_r': aSite = 0 elif pathPointsRight[sR[-1]].body.name == 'tibia_r': aSite = -1 ppR = pathPointsRight[sR[aSite]] ppR.location = rightFibulaAttachments[i] for i in range(len(rightPatellaMuscleNames)): muscleRight = self.gias_osimmodel.muscles[ str(rightPatellaMuscleNames[i])] pathPointsRight = muscleRight.path_points sR = sorted(muscleRight.path_points.keys()) aSite = None # aSite will be 0 if attachment is an origin and -1 if insertion if pathPointsRight[sR[0]].body.name == 'tibia_r': aSite = 0 elif pathPointsRight[sR[-1]].body.name == 'tibia_r': aSite = -1 ppR = pathPointsRight[sR[aSite]] ppR.location = rightPatellaAttachments[i] def set_workflow_location(self, location): self._workflow_location = location def write_cust_osim_model(self): self.gias_osimmodel.save( os.path.join(self._workflow_location, self.config['osim_output_dir'], OSIM_FILENAME) ) def customise(self): # Note: a number of PathPoints that were scaled in the previous plugin # are also being scaled here. Are both of these necessary? self.cust_pelvis() self.cust_femur_l() self.cust_tibia_l() self.cust_femur_r() self.cust_tibia_r() # What is being done in the following methods that wasn't in the # previous plugin or one of the cust (^) methods? They seem to be # updating the same values that were updated earlier. self.update_hip_muscles() self.update_knee_muscles() self.update_foot_muscles() self.update_wrap_points() # The gait2392 default marker set was comprehensively updated in the # previous plugin. Many of the markers being added here appear to be # duplicates of gait2392 markers. If we need to add any additional # markers to the Model we should use this method. # self.update_marker_set() if self.config['update_max_iso_forces']: self.update_max_iso_forces() # Currently, none of the OFL and TSL values are being re-calculated # after updating the PathPoints. They have been scaled in the previous # plugin but could be done more accurately here. if self.config['write_osim_file']: self.write_cust_osim_model() self.move_mesh_files() def move_mesh_files(self): output_directory = os.path.join(self._workflow_location, self.config['osim_output_dir']) source_dir = os.path.join(self._workflow_location, '../output/Geometry') target_dir = os.path.join(output_directory, './Geometry') if os.path.exists(target_dir): shutil.rmtree(target_dir) shutil.move(source_dir, target_dir) # This method assumes the current max iso force is in mm and multiplies it # to get the value in cm. I'm not sure it should be doing this (or not like # this at least). It should depend on the plugin configuration, right? def update_max_iso_forces(self): osimModel = self.gias_osimmodel subjectHeight = float(self.config['subject_height']) subjectMass = float(self.config['subject_mass']) # calculate muscle volumes using Handsfield (2014) osimAbbr, muscleVolume = muscleVolumeCalculator( subjectHeight, subjectMass) # load Opensim model muscle set allMuscles = osimModel.get_muscles() allMusclesNames = list(range(allMuscles.getSize())) oldValue = np.zeros([allMuscles.getSize(), 1]) optimalFibreLength = np.zeros([allMuscles.getSize(), 1]) penAngleAtOptFibLength = np.zeros([allMuscles.getSize(), 1]) for i in range(allMuscles.getSize()): allMusclesNames[i] = allMuscles.get(i).getName() oldValue[i] = allMuscles.get(i).getMaxIsometricForce() optimalFibreLength[i] = allMuscles.get(i).getOptimalFiberLength() penAngleAtOptFibLength[i] = np.rad2deg( allMuscles.get(i).getPennationAngleAtOptimalFiberLength()) # convert opt. fibre length from [m] to [cm] to match volume units # [cm^3] # Shouldn't this (and the volume units) depend on the plugin config? optimalFibreLength *= 100 allMusclesNamesCut = list(range(allMuscles.getSize())) for i in range(len(allMusclesNames)): # delete trailing '_r' or '_l' currMuscleName = allMusclesNames[i][0:-2] # split the name from any digit in its name and only keep the first # string. currMuscleName = re.split(r'(\d+)', currMuscleName) currMuscleName = currMuscleName[0] # store in cell allMusclesNamesCut[i] = currMuscleName # calculate ratio of old max isometric forces for # multiple-lines-of-action muscles. newAbsVolume = np.zeros([allMuscles.getSize(), 1]) fracOfGroup = np.zeros([allMuscles.getSize(), 1]) for i in range(allMuscles.getSize()): currMuscleName = allMusclesNamesCut[i] currIndex = [ j for j, x in enumerate(osimAbbr) if x == currMuscleName] # currIndex = osimAbbr.index(currMuscleName) if currIndex: currValue = muscleVolume[currIndex] newAbsVolume[i] = currValue # The peroneus longus/brevis and the extensors (EDL, EHL) have to # be treated seperatly as they are represented as a combined muscle # group in Handsfield, 2014. The following method may not be the # best! if currMuscleName == 'per_brev' or currMuscleName == 'per_long': currMuscleNameIndex = np.array([0, 0]) tmpIndex = [j for j, x in enumerate( allMusclesNamesCut) if x == 'per_brev'] currMuscleNameIndex[0] = tmpIndex[0] tmpIndex = [j for j, x in enumerate( allMusclesNamesCut) if x == 'per_long'] currMuscleNameIndex[1] = tmpIndex[0] currIndex = [j for j, x in enumerate(osimAbbr) if x == 'per_'] currValue = muscleVolume[currIndex] newAbsVolume[i] = currValue elif currMuscleName == 'ext_dig' or currMuscleName == 'ext_hal': currMuscleNameIndex = np.array([0, 0]) tmpIndex = [j for j, x in enumerate( allMusclesNamesCut) if x == 'ext_dig'] currMuscleNameIndex[0] = tmpIndex[0] tmpIndex = [j for j, x in enumerate( allMusclesNamesCut) if x == 'ext_hal'] currMuscleNameIndex[1] = tmpIndex[0] currIndex = [j for j, x in enumerate(osimAbbr) if x == 'ext_'] currValue = muscleVolume[currIndex] newAbsVolume[i] = currValue else: # find all instances of each muscle currMuscleNameIndex = [j for j, x in enumerate( allMusclesNamesCut) if x == currMuscleName] # only require half of the results as we only need muscles from # one side currMuscleNameIndex = currMuscleNameIndex[0:int(len( currMuscleNameIndex) / 2)] # find how much of the total muscle volume this muscle contributes fracOfGroup[i] = oldValue[i] / sum(oldValue[currMuscleNameIndex]) # calculate new maximal isometric muscle forces specificTension = 61 # N/cm^2 from Zajac 1989 newVolume = fracOfGroup * newAbsVolume # maxIsoMuscleForce = specificTension * (newVolume/optimalFibreLength) # * np.cos(math.degrees(penAngleAtOptFibLength)) # Update muscles of loaded model (in workspace only!), change model # name and print new osim file. maxIsoMuscleForce = np.zeros([allMuscles.getSize(), 1]) for i in range(allMuscles.getSize()): maxIsoMuscleForce[i] = specificTension * ( newVolume[i] / optimalFibreLength[i]) * np.cos( math.radians(penAngleAtOptFibLength[i])) # only update, if new value is not zero. Else do not override the # original value. if maxIsoMuscleForce[i] != 0: allMuscles.get(i).setMaxIsometricForce(maxIsoMuscleForce[i][0]) def update_hip_muscles(self): muscleNames = ['glut_max1_l', 'glut_max2_l', 'glut_max3_l', 'peri_l', 'iliacus_l', 'psoas_l', 'glut_max1_r', 'glut_max2_r', 'glut_max3_r', 'peri_r', 'psoas_r', 'iliacus_r'] joint = 'hip' body = 'pelvis' # joint - the joint that the muscles cross (currently only works for # muscles that cross a single joint) # body - the body that the origins of the muscles are attached to # this has only been tested for muscles that cross the hip # load in the original model mO = osim.Model(TEMPLATE_OSIM_PATH) mO.init_system() # for each muscle for i in range(len(muscleNames)): # display the pathpoints for both muscles muscleO = mO.muscles[muscleNames[i]] muscle = self.gias_osimmodel.muscles[muscleNames[i]] side = muscle.name[-2:] # find the transformation between the two bodies the muscles are # attached to transO = mO.joints[joint + side].locationInParent trans = self.gias_osimmodel.joints[joint + side].locationInParent pathPointsO = copy.copy(muscleO.path_points) pathPoints = copy.copy(muscle.path_points) for j in range(len(pathPointsO)): if list(pathPointsO.values())[j].body.name == body: list(pathPointsO.values())[j].location -= transO list(pathPoints.values())[j].location -= trans # ################################################## # # ###############Transform Points################### # # ################################################## # # find the path point names for the origin and the insertion sortedKeys = sorted(muscle.path_points.keys()) # the origin will be the first sorted key and the insertion last orig = sortedKeys[0] ins = sortedKeys[-1] # find vector between origins and insertions v1 = pathPoints[orig].location - pathPointsO[orig].location v2 = pathPoints[ins].location - pathPointsO[ins].location # the new points are going to be found by translating the points # based on a weighting mulitplied by these two vectors # the weighting will be how far along the muscle the point it # find the total muscle length segments = np.zeros([len(pathPointsO) - 1, 3]) lengths = np.zeros(len(pathPointsO) - 1) for j in range(len(pathPointsO) - 1): segments[j] = pathPointsO[muscle.name + '-P' + str( j + 2)].location - pathPointsO[ muscle.name + '-P' + str(j + 1)].location lengths[j] = np.linalg.norm(segments[j]) Tl = np.sum(lengths) # Define the weighting function # for the points calculate the magnitude of the new vector and at # what angle for j in range(len(pathPointsO) - 2): # the second pathpoint will be the first via point p = pathPointsO[muscle.name + '-P' + str(j + 2)].location # find how far along the muscle the point is dl = np.sum(lengths[:j + 1]) # create the new points by finding adding a weighted vector pNew = ((dl / Tl) * v2) + ((1 - dl / Tl) * v1) + p # update the opensim model muscle.path_points[muscle.name + '-P' + str( j + 2)].location = pNew # tranform the points back to the main body local coordinate system for j in range(len(pathPoints)): if list(pathPoints.values())[j].body.name == body: list(pathPoints.values())[j].location += trans def update_knee_muscles(self): muscleNames = ['bifemlh_l', 'semimem_l', 'semiten_l', 'sar_l', 'tfl_l', 'grac_l', 'rect_fem_l', 'bifemlh_r', 'semimem_r', 'semiten_r', 'sar_r', 'tfl_r', 'grac_r', 'rect_fem_r', 'bifemsh_l', 'vas_med_l', 'vas_int_l', 'vas_lat_l', 'bifemsh_r', 'vas_med_r', 'vas_int_r', 'vas_lat_r', 'med_gas_l', 'lat_gas_l', 'med_gas_r', 'lat_gas_r'] # This is being done multiple times. Should move outside this method. # load in the original model mO = osim.Model(TEMPLATE_OSIM_PATH) mO.init_system() for i in range(len(muscleNames)): # display the pathpoints for both muscles muscleO = mO.muscles[muscleNames[i]] muscle = self.gias_osimmodel.muscles[muscleNames[i]] pathPointsO = copy.copy(muscleO.path_points) pathPoints = copy.copy(muscle.path_points) for j in range(len(pathPointsO)): list(pathPointsO.values())[j].location += local_osim_2_global( list(pathPointsO.values())[j].body.name, mO) list(pathPoints.values())[j].location += local_osim_2_global( list(pathPoints.values())[j].body.name, self.gias_osimmodel) # find the path point names for the origin and the insertion sortedKeys = sorted(muscle.path_points.keys()) # the origin will be the first sorted key and the insertion last orig = sortedKeys[0] ins = sortedKeys[-1] # find vector between origins and insertions v1 = pathPoints[orig].location - pathPointsO[orig].location v2 = pathPoints[ins].location - pathPointsO[ins].location # the new points are going to be found by translating the points # based on a weighting mulitplied by these two vectors # the weighting will be how far along the muscle the point it # find the total muscle length segments = np.zeros([len(pathPointsO) - 1, 3]) lengths = np.zeros(len(pathPointsO) - 1) for j in range(len(pathPointsO) - 1): segments[j] = pathPointsO[muscle.name + '-P' + str( j + 2)].location - pathPointsO[ muscle.name + '-P' + str(j + 1)].location lengths[j] = np.linalg.norm(segments[j]) Tl = np.sum(lengths) # Define the weighting function for the points calculate the # magnitude of the new vector and at what angle for j in range(len(pathPointsO) - 2): # the second pathpoint will be the first via point p = pathPointsO[muscle.name + '-P' + str(j + 2)].location # find how far along the muscle the point is dl = np.sum(lengths[:j + 1]) # create the new points by finding adding a weighted vector pNew = ((dl / Tl) * v2) + ((1 - dl / Tl) * v1) + p # update the opensim model muscle.path_points[muscle.name + '-P' + str( j + 2)].location = pNew # tranform the pelvis points back to the pelvis region for j in range(len(pathPoints)): list(pathPoints.values())[j].location -= local_osim_2_global( list(pathPoints.values())[j].body.name, self.gias_osimmodel) def update_foot_muscles(self): muscleNames = ['ext_dig_l', 'ext_hal_l', 'flex_dig_l', 'flex_hal_l', 'per_brev_l', 'per_long_l', 'per_tert_l', 'tib_ant_l', 'tib_post_l', 'ext_dig_r', 'ext_hal_r', 'flex_dig_r', 'flex_hal_r', 'per_brev_r', 'per_long_r', 'per_tert_r', 'tib_ant_r', 'tib_post_r'] # load in the original model mO = osim.Model(TEMPLATE_OSIM_PATH) mO.init_system() for i in range(len(muscleNames)): # get the pathPoints for the old and new muscle muscleO = mO.muscles[muscleNames[i]] muscle = self.gias_osimmodel.muscles[muscleNames[i]] side = muscle.name[-1] # find the transformation between the two bodies the muscles are # attached to transO = mO.joints['ankle_' + side].locationInParent + mO.joints[ 'subtalar_' + side].locationInParent trans = self.gias_osimmodel.joints['ankle_' + side]\ .locationInParent + self.gias_osimmodel.joints[ 'subtalar_' + side].locationInParent pathPointsO = copy.copy(muscleO.path_points) pathPoints = copy.copy(muscle.path_points) # ################################################## # # ###############Transform Points################### # # ################################################## # # find the path point names for the origin and the insertion sortedKeys = sorted(muscle.path_points.keys()) # the origin will be the first sorted key orig = sortedKeys[0] ins = None # find the first point on the calcn for j in sortedKeys: if pathPoints[j].body.name == 'calcn_' + side: ins = j break endPP = sortedKeys.index(ins) for j in range(endPP + 1): if pathPointsO[sortedKeys[j]].body.name == 'calcn_' + side: pathPointsO[sortedKeys[j]].location += transO pathPoints[sortedKeys[j]].location += trans # find vector between origins and insertions v1 = pathPoints[orig].location - pathPointsO[orig].location v2 = pathPoints[ins].location - pathPointsO[ins].location # the new points are going to be found by translating the points # based on a weighting mulitplied by these two vectors # the weighting will be how far along the muscle the point it # find the total muscle length segments = np.zeros([endPP, 3]) lengths = np.zeros(endPP) for j in range(endPP): segments[j] = pathPointsO[muscle.name + '-P' + str( j + 2)].location - pathPointsO[ muscle.name + '-P' + str(j + 1)].location lengths[j] = np.linalg.norm(segments[j]) Tl = np.sum(lengths) # Define the weighting function for the points calculate the # magnitude of the new vector and at what angle for j in range(endPP - 1): # the second pathpoint will be the first via point p = pathPointsO[muscle.name + '-P' + str(j + 2)].location # find how far along the muscle the point is dl = np.sum(lengths[:j + 1]) # create the new points by finding adding a weighted vector pNew = ((dl / Tl) * v2) + ((1 - dl / Tl) * v1) + p # update the opensim model muscle.path_points[muscle.name + '-P' + str( j + 2)].location = pNew for j in range(endPP + 1): if pathPoints[sortedKeys[j]].body.name == 'calcn_' + side: pathPoints[sortedKeys[j]].location -= trans def update_wrap_points(self): muscleNames = ['psoas_l', 'iliacus_l', 'psoas_r', 'iliacus_r'] wrapNames = ['PS_at_brim_l', 'IL_at_brim_l', 'PS_at_brim_r', 'IL_at_brim_r'] joint = 'hip' wrapPoints = {'psoas_l': 26, 'psoas_r': 26, 'iliacus_l': 4926, 'iliacus_r': 26} for i in range(len(muscleNames)): wrap = self.gias_osimmodel.wrapObjects[wrapNames[i]] radiiString = wrap.getDimensions() # increase the radii by a small amount so the via point don't sit # directly on the wrap object radii = np.array(str.split(radiiString))[1:].astype(float) + 0.002 theta = np.linspace(0, 2 * pi, 100) phi = np.linspace(0, pi, 50) sphere = np.zeros([1, 3]) wrapCentre = wrap.get_translation() for j in range(len(theta)): for k in range(len(phi)): x = wrapCentre[0] + radii[0] * np.cos(theta[j]) * np.sin( phi[k]) y = wrapCentre[1] + radii[1] * np.sin(theta[j]) * np.sin( phi[k]) z = wrapCentre[2] + radii[2] * np.cos(phi[k]) if i == 0 and j == 0: sphere[i, :] = [x, y, z] else: sphere = np.vstack([sphere, [x, y, z]]) # with the sphere created get the via point muscle = self.gias_osimmodel.muscles[muscleNames[i]] viaPoint = muscle.path_points[muscle.name + '-P2'] # find the closest point on the sphere newPoint = sphere[wrapPoints[muscle.name]] # update the path point viaPoint.location = newPoint # check if P-3 is inside the wrap surface checkPoint = muscle.path_points[muscle.name + '-P3'] # tranform to global coordinates side = muscleNames[i][-2:] # find the transformation between the two bodies the muscles are # attached to trans = self.gias_osimmodel.joints[joint + side].locationInParent # find the distance between the closest point on the sphere and the # centre dists = sphere - (checkPoint.location + trans) # normalize the distances to each point normDists = np.linalg.norm(dists, axis=1) nodeNum = np.argmin(normDists) np_wrap_centre = np.array( [wrapCentre[0], wrapCentre[1], wrapCentre[2]]) d1 = np.linalg.norm(np_wrap_centre - sphere[nodeNum]) # find the distance between the point and the centre of the sphere d2 = np.linalg.norm(np_wrap_centre - (checkPoint.location + trans)) # If the distance d1 is larger than d2 move the point is inside the # sphere # and needs to be moved to the closest point on the sphere if d1 > d2: checkPoint.location = sphere[nodeNum] - trans def update_marker_set(self): # create dictionary linking landmarks to bodies based on the Cleveland # Marker Set # Many of these markers are already included in the gait2392 MarkerSet. # This method should be used to add any additional LowerLimb landmarks # to the OpenSim model. Make sure the target landmarks are being output # by the LowerLimbGeneration step. fieldworkMarkers = { # 'pelvis': ['RASI', 'LASI', 'RPSI', 'LPSI', 'SACR', 'LHJC', # 'RHJC'], # 'femur_l': ['LT1', 'LT2', 'LT3', 'LKNE', 'LKNM', 'LKJC'], # 'femur_r': ['RT1', 'RT2', 'RT3', 'RKNE', 'RKNM', 'RKJC'], # 'tibia_l': ['LS1', 'LS2', 'LS3', 'LANK', 'LANM', 'LAJC'], # 'tibia_r': ['RS1', 'RS2', 'RS3', 'RANK', 'RANM', 'RAJC'], } otherMarkers = { 'torso': ['C7', 'T10', 'CLAV', 'STRN', 'BackExtra', 'LSHO', 'LTRI', 'LELB', 'LWRI', 'RSHO', 'RTRI', 'RELB', 'RWRI'], 'calcn_l': ['LHEE'], 'toes_l': ['LTOE'], 'calcn_r': ['RHEE'], 'toes_r': ['RTOE'] } self.gias_osimmodel.init_system() # load in the geometric fields and update their coordinate systems to # align with opensim. # This may have already been done? pelvis = self.ll.models['pelvis'] femur_l = self.ll.models['femur-l'] update_femur_opensim_acs(femur_l) femur_r = self.ll.models['femur-r'] femur_r.side = 'right' update_femur_opensim_acs(femur_r) tibia_l = self.ll.models['tibiafibula-l'] update_tibiafibula_opensim_acs(tibia_l) tibia_r = self.ll.models['tibiafibula-r'] tibia_r.side = 'right' update_tibiafibula_opensim_acs(tibia_r) markerSet = osim.opensim.MarkerSet() # for each body with a fieldwork model, map the markers to its body data = self.landmarks # for each marker for i in data: body = None # find what body the marker belongs to for j in fieldworkMarkers.keys(): for k in range(len(fieldworkMarkers[j])): if fieldworkMarkers[j][k] == i: body = self.gias_osimmodel.bodies[j] full_frame_name = "/bodyset/" + j newMarker = osim.Marker( name=i, frame_name=full_frame_name, location=eval(j).acs.map_local(np.array([data[ fieldworkMarkers[j][k]]])).flatten() / 1000 ) markerSet.adoptAndAppend(newMarker.get_osim_marker()) break if body is not None: break # if the body has no fieldwork model check if it can be # found in the extra dictionary if body is None: # import pdb # pdb.set_trace() for j in otherMarkers.keys(): for k in range(len(otherMarkers[j])): if otherMarkers[j][k] == i: body = "/bodyset/" + j if body == 'torso': pointOnParent = pelvis.acs.map_local( np.array([data[i]])).flatten() / 1000 # find the difference in body coordinates diff = self.gias_osimmodel.joints[ 'back'].locationInParent markerPos = pointOnParent - diff newMarker = osim.Marker( name=i, frame_name=body, location=markerPos ) markerSet.adoptAndAppend( newMarker.get_osim_marker()) elif body == 'calcn_l': pointOnParent = tibia_l.acs.map_local(
np.array([data[i]])
numpy.array
from __future__ import print_function """ Markov based methods for spatial dynamics. """ __author__ = "<NAME> <<EMAIL>" __all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback", "prais", "shorrock", "homogeneity"] import numpy as np from pysal.spatial_dynamics.ergodic import fmpt from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE from scipy import stats from operator import gt import pysal # TT predefine LISA transitions # TT[i,j] is the transition type from i to j # i = quadrant in period 0 # j = quadrant in period 1 # uses one offset so first row and col of TT are ignored TT = np.zeros((5, 5), int) c = 1 for i in range(1, 5): for j in range(1, 5): TT[i, j] = c c += 1 # MOVE_TYPES is a dictionary that returns the move type of a LISA transition # filtered on the significance of the LISA end points # True indicates significant LISA in a particular period # e.g. a key of (1, 3, True, False) indicates a significant LISA located in # quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not # significant in quadrant 3. MOVE_TYPES = {} c = 1 cases = (True, False) sig_keys = [(i, j) for i in cases for j in cases] for i, sig_key in enumerate(sig_keys): c = 1 + i * 16 for i in range(1, 5): for j in range(1, 5): key = (i, j, sig_key[0], sig_key[1]) MOVE_TYPES[key] = c c += 1 class Markov(object): """ Classic Markov transition matrices. Parameters ---------- class_ids : array (n, t), one row per observation, one column recording the state of each observation, with as many columns as time periods. classes : array (k, 1), all different classes (bins) of the matrix. Attributes ---------- p : matrix (k, k), transition probability matrix. steady_state : matrix (k, 1), ergodic distribution. transitions : matrix (k, k), count of transitions between each state i and j. Examples -------- >>> c = [['b','a','c'],['c','c','a'],['c','b','c']] >>> c.extend([['a','a','b'], ['a','b','c']]) >>> c = np.array(c) >>> m = Markov(c) >>> m.classes.tolist() ['a', 'b', 'c'] >>> m.p matrix([[ 0.25 , 0.5 , 0.25 ], [ 0.33333333, 0. , 0.66666667], [ 0.33333333, 0.33333333, 0.33333333]]) >>> m.steady_state matrix([[ 0.30769231], [ 0.28846154], [ 0.40384615]]) US nominal per capita income 48 states 81 years 1929-2009 >>> import pysal >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) set classes to quintiles for each year >>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose() >>> m = Markov(q5) >>> m.transitions array([[ 729., 71., 1., 0., 0.], [ 72., 567., 80., 3., 0.], [ 0., 81., 631., 86., 2.], [ 0., 3., 86., 573., 56.], [ 0., 0., 1., 57., 741.]]) >>> m.p matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ], [ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ], [ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ], [ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443], [ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]]) >>> m.steady_state matrix([[ 0.20774716], [ 0.18725774], [ 0.20740537], [ 0.18821787], [ 0.20937187]]) Relative incomes >>> pci = pci.transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> rq = pysal.Quantiles(rpci.flatten()).yb >>> rq.shape = (48,81) >>> mq = Markov(rq) >>> mq.transitions array([[ 707., 58., 7., 1., 0.], [ 50., 629., 80., 1., 1.], [ 4., 79., 610., 73., 2.], [ 0., 7., 72., 650., 37.], [ 0., 0., 0., 48., 724.]]) >>> mq.steady_state matrix([[ 0.17957376], [ 0.21631443], [ 0.21499942], [ 0.21134662], [ 0.17776576]]) """ def __init__(self, class_ids, classes=None): if classes is not None: self.classes = classes else: self.classes = np.unique(class_ids) n, t = class_ids.shape k = len(self.classes) js = range(t - 1) classIds = self.classes.tolist() transitions = np.zeros((k, k)) for state_0 in js: state_1 = state_0 + 1 state_0 = class_ids[:, state_0] state_1 = class_ids[:, state_1] initial = np.unique(state_0) for i in initial: ending = state_1[state_0 == i] uending = np.unique(ending) row = classIds.index(i) for j in uending: col = classIds.index(j) transitions[row, col] += sum(ending == j) self.transitions = transitions row_sum = transitions.sum(axis=1) p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions) self.p = np.matrix(p) @property def steady_state(self): if not hasattr(self, '_steady_state'): self._steady_state = STEADY_STATE(self.p) return self._steady_state class Spatial_Markov(object): """ Markov transitions conditioned on the value of the spatial lag. Parameters ---------- y : array (n,t), one row per observation, one column per state of each observation, with as many columns as time periods. w : W spatial weights object. k : integer number of classes (quantiles). permutations : int, optional number of permutations for use in randomization based inference (the default is 0). fixed : bool If true, quantiles are taken over the entire n*t pooled series. If false, quantiles are taken each time period over n. variable_name : string name of variable. Attributes ---------- p : matrix (k, k), transition probability matrix for a-spatial Markov. s : matrix (k, 1), ergodic distribution for a-spatial Markov. transitions : matrix (k, k), counts of transitions between each state i and j for a-spatial Markov. T : matrix (k, k, k), counts of transitions for each conditional Markov. T[0] is the matrix of transitions for observations with lags in the 0th quantile; T[k-1] is the transitions for the observations with lags in the k-1th. P : matrix (k, k, k), transition probability matrix for spatial Markov first dimension is the conditioned on the lag. S : matrix (k, k), steady state distributions for spatial Markov. Each row is a conditional steady_state. F : matrix (k, k, k),first mean passage times. First dimension is conditioned on the lag. shtest : list (k elements), each element of the list is a tuple for a multinomial difference test between the steady state distribution from a conditional distribution versus the overall steady state distribution: first element of the tuple is the chi2 value, second its p-value and the third the degrees of freedom. chi2 : list (k elements), each element of the list is a tuple for a chi-squared test of the difference between the conditional transition matrix against the overall transition matrix: first element of the tuple is the chi2 value, second its p-value and the third the degrees of freedom. x2 : float sum of the chi2 values for each of the conditional tests. Has an asymptotic chi2 distribution with k(k-1)(k-1) degrees of freedom. Under the null that transition probabilities are spatially homogeneous. (see chi2 above) x2_dof : int degrees of freedom for homogeneity test. x2_pvalue : float pvalue for homogeneity test based on analytic. distribution x2_rpvalue : float (if permutations>0) pseudo p-value for x2 based on random spatial permutations of the rows of the original transitions. x2_realizations : array (permutations,1), the values of x2 for the random permutations. Q : float Chi-square test of homogeneity across lag classes based on Bickenbach and Bode (2003) [Bickenbach2003]_. Q_p_value : float p-value for Q. LR : float Likelihood ratio statistic for homogeneity across lag classes based on Bickenback and Bode (2003) [Bickenbach2003]_. LR_p_value : float p-value for LR. dof_hom : int degrees of freedom for LR and Q, corrected for 0 cells. Notes ----- Based on Rey (2001) [Rey2001]_. The shtest and chi2 tests should be used with caution as they are based on classic theory assuming random transitions. The x2 based test is preferable since it simulates the randomness under the null. It is an experimental test requiring further analysis. This is new Examples -------- >>> import pysal as ps >>> f = ps.open(ps.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) >>> pci = pci.transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = ps.open(ps.examples.get_path("states48.gal")).read() >>> w.transform = 'r' >>> sm = ps.Spatial_Markov(rpci, w, fixed=True, k=5, variable_name='rpci') >>> for p in sm.P: ... print(p) ... [[ 0.96341463 0.0304878 0.00609756 0. 0. ] [ 0.06040268 0.83221477 0.10738255 0. 0. ] [ 0. 0.14 0.74 0.12 0. ] [ 0. 0.03571429 0.32142857 0.57142857 0.07142857] [ 0. 0. 0. 0.16666667 0.83333333]] [[ 0.79831933 0.16806723 0.03361345 0. 0. ] [ 0.0754717 0.88207547 0.04245283 0. 0. ] [ 0.00537634 0.06989247 0.8655914 0.05913978 0. ] [ 0. 0. 0.06372549 0.90196078 0.03431373] [ 0. 0. 0. 0.19444444 0.80555556]] [[ 0.84693878 0.15306122 0. 0. 0. ] [ 0.08133971 0.78947368 0.1291866 0. 0. ] [ 0.00518135 0.0984456 0.79274611 0.0984456 0.00518135] [ 0. 0. 0.09411765 0.87058824 0.03529412] [ 0. 0. 0. 0.10204082 0.89795918]] [[ 0.8852459 0.09836066 0. 0.01639344 0. ] [ 0.03875969 0.81395349 0.13953488 0. 0.00775194] [ 0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ] [ 0. 0.02339181 0.12865497 0.75438596 0.09356725] [ 0. 0. 0. 0.09661836 0.90338164]] [[ 0.33333333 0.66666667 0. 0. 0. ] [ 0.0483871 0.77419355 0.16129032 0.01612903 0. ] [ 0.01149425 0.16091954 0.74712644 0.08045977 0. ] [ 0. 0.01036269 0.06217617 0.89637306 0.03108808] [ 0. 0. 0. 0.02352941 0.97647059]] The probability of a poor state remaining poor is 0.963 if their neighbors are in the 1st quintile and 0.798 if their neighbors are in the 2nd quintile. The probability of a rich economy remaining rich is 0.976 if their neighbors are in the 5th quintile, but if their neighbors are in the 4th quintile this drops to 0.903. The Q and likelihood ratio statistics are both significant indicating the dynamics are not homogeneous across the lag classes: >>> "%.3f"%sm.LR '170.659' >>> "%.3f"%sm.Q '200.624' >>> "%.3f"%sm.LR_p_value '0.000' >>> "%.3f"%sm.Q_p_value '0.000' >>> sm.dof_hom 60 The long run distribution for states with poor (rich) neighbors has 0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the fourth and 0.029 (0.337) in the fifth quintile. >>> sm.S array([[ 0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278], [ 0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356], [ 0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ], [ 0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013], [ 0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]]) States with incomes in the first quintile with neighbors in the first quintile return to the first quartile after 2.298 years, after leaving the first quintile. They enter the fourth quintile after 80.810 years after leaving the first quintile, on average. Poor states within neighbors in the fourth quintile return to the first quintile, on average, after 12.88 years, and would enter the fourth quintile after 28.473 years. >>> for f in sm.F: ... print(f) ... [[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143] [ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286] [ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429] [ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762] [ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]] [[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197] [ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ] [ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482] [ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314] [ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]] [[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026] [ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693] [ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594] [ 83.72288828 39.06666667 14.3 3.44668119 76.36702977] [ 93.52288828 48.86666667 24.1 9.8 8.79255406]] [[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142] [ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423] [ 117.76777159 23.03735526 3.94436301 15.0843986 43.57927247] [ 127.89752089 32.4393006 14.56853107 4.44831643 31.63099455] [ 138.24752089 42.7893006 24.91853107 10.35 4.05613474]] [[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826] [ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826] [ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217] [ 127.1407767 48.74107143 33.29605263 3.91777427 83.52173913] [ 169.6407767 91.24107143 75.79605263 42.5 2.96521739]] """ def __init__(self, y, w, k=4, permutations=0, fixed=False, variable_name=None): self.y = y rows, cols = y.shape self.k = k self.cols = cols npa = np.array self.fixed = fixed self.variable_name = variable_name if fixed: yf = y.flatten() yb = pysal.Quantiles(yf, k=k).yb yb.shape = (rows, cols) classes = yb else: classes = npa([pysal.Quantiles(y[:, i], k=k) .yb for i in np.arange(cols)]).transpose() classic = Markov(classes) self.classes = classes self.p = classic.p self.transitions = classic.transitions T, P = self._calc(y, w, classes, k=k) self.T = T self.P = P if permutations: nrp = np.random.permutation counter = 0 x2_realizations = np.zeros((permutations, 1)) for perm in range(permutations): T, P = self._calc(nrp(y), w, classes, k=k) x2 = [chi2(T[i], self.transitions)[0] for i in range(k)] x2s = sum(x2) x2_realizations[perm] = x2s if x2s >= self.x2: counter += 1 self.x2_rpvalue = (counter + 1.0) / (permutations + 1.) self.x2_realizations = x2_realizations @property def s(self): if not hasattr(self, '_s'): self._s = STEADY_STATE(self.p) return self._s @property def S(self): if not hasattr(self, '_S'): S =
np.zeros_like(self.p)
numpy.zeros_like
#%% 切换运行路径 import os,sys cur_path = sys.path[0].split(os.path.sep) workspace_path = os.path.sep.join(cur_path[:cur_path.index("bestpaycup2020")+1]) base_dir = workspace_path os.chdir(workspace_path) # 把运行目录强制转移到【工作区】 print(f"把运行目录强制转移到【工作区】{os.getcwd()}") # %% import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import os from sklearn.metrics import roc_auc_score # %% base_dir = os.getcwd() # base_dir = '/Users/jason/bestpaycup2020' x_df = pd.read_csv(base_dir + '/dataset/dataset1/trainset/train_base.csv') y_df = pd.read_csv(base_dir + '/dataset/raw_dataset/trainset/train_label.csv') data_x = np.array(x_df) # train_x = np.delete(train_x, 0, axis=1) data_y = np.array(y_df) # %% # 将x与y对应,并做预处理 data_x = data_x[data_x[:, 0].argsort()] data_y = data_y[data_y[:, 0].argsort()] data_x = data_x[:, 1:].astype(float) data_y = data_y[:, 1:].astype(float).reshape(1, -1)[0] # %% # 归一化 n, l = data_x.shape for j in range(l): meanVal = np.mean(data_x[:, j]) stdVal = np.std(data_x[:, j]) data_x[:, j] = (data_x[:, j] - meanVal) / stdVal # %% # 打乱数据 state =
np.random.get_state()
numpy.random.get_state
""" A billboarded particle layer with texture/shader support """ import numpy as np from abc import ABC from collections.abc import Iterable from napari.layers import Surface from napari.layers.utils.layer_utils import calc_data_range from vispy.visuals.filters import Filter from vispy.visuals.shaders import Function, Varying from vispy.gloo import Texture2D, VertexBuffer from .utils import generate_billboards_2d from .filters import ShaderFilter, _shader_functions class BillboardsFilter(Filter): """ Billboard geometry filter (transforms vertices to always face camera) """ def __init__(self, antialias=0): vmat_inv = Function(""" mat2 inverse(mat2 m) { return mat2(m[1][1],-m[0][1],-m[1][0], m[0][0]) / (m[0][0]*m[1][1] - m[0][1]*m[1][0]); } """) vfunc = Function(""" varying float v_z_center; varying float v_scale_intensity; varying mat2 covariance_inv; void apply(){ // original world coordinates of the (constant) particle squad, e.g. [5,5] for size 5 vec4 pos = $transform_inv(gl_Position); pos.z *= pos.w; vec2 tex = $texcoords; mat4 cov = mat4(1.0); cov[0][0] = sqrt($sigmas[0]); cov[1][1] = sqrt($sigmas[1]); cov[2][2] = sqrt($sigmas[2]); // get new inverse covariance matrix (for rotating a gaussian) vec4 ex = vec4(1,0,0,0); vec4 ey = vec4(0,1,0,0); vec4 ez = vec4(0,0,1,0); vec3 ex2 = $camera(cov*$camera_inv(ex)).xyz; vec3 ey2 = $camera(cov*$camera_inv(ey)).xyz; vec3 ez2 = $camera(cov*$camera_inv(ez)).xyz; mat3 Rmat = mat3(ex2, ey2, ez2); covariance_inv = mat2(transpose(Rmat)*mat3(cov)*Rmat); covariance_inv = $inverse(covariance_inv); // get first and second column of view (which is the inverse of the camera) vec3 camera_right = $camera_inv(vec4(1,0,0,0)).xyz; vec3 camera_up = $camera_inv(vec4(0,1,0,0)).xyz; // when particles become too small, lock texture size and apply antialiasing (only used when antialias=1) // decrease this value to increase antialiasing //float dist_cutoff = .2 * max(abs(pos.x), abs(pos.y)); // increase this value to increase antialiasing float dist_cutoff = $antialias; float len = length(camera_right); //camera_right = normalize(camera_right); //camera_up = normalize(camera_up); camera_right = camera_right/len; camera_up = camera_up/len; vec4 p1 = $transform(vec4($vertex_center.xyz + camera_right*pos.x + camera_up*pos.y, 1.)); vec4 p2 = $transform(vec4($vertex_center,1)); float dist = length(p1.xy/p1.w-p2.xy/p2.w); // if antialias and far away zoomed out, keep sprite size constant and shrink texture... // else adjust sprite size if (($antialias>0) && (dist<dist_cutoff)) { float scale = dist_cutoff/dist; //tex = .5+(tex-.5)*clamp(scale,1,10); tex = .5+(tex-.5); camera_right = camera_right*scale; camera_up = camera_up*scale; v_scale_intensity = scale; } vec3 pos_real = $vertex_center.xyz + camera_right*pos.x + camera_up*pos.y; gl_Position = $transform(vec4(pos_real, 1.)); vec4 center = $transform(vec4($vertex_center,1)); v_z_center = center.z/center.w; $v_texcoords = tex; } """) ffunc = Function(""" varying float v_scale_intensity; varying float v_z_center; void apply() { gl_FragDepth = v_z_center; $texcoords; } """) self._texcoord_varying = Varying('v_texcoord', 'vec2') vfunc['inverse'] = vmat_inv vfunc['v_texcoords'] = self._texcoord_varying ffunc['texcoords'] = self._texcoord_varying self._texcoords_buffer = VertexBuffer( np.zeros((0, 2), dtype=np.float32) ) vfunc['texcoords'] = self._texcoords_buffer vfunc['antialias'] = float(antialias) self._centercoords_buffer = VertexBuffer( np.zeros((0, 3), dtype=np.float32)) self._sigmas_buffer = VertexBuffer( np.zeros((0, 3), dtype=np.float32)) vfunc['vertex_center'] = self._centercoords_buffer vfunc['sigmas'] = self._sigmas_buffer super().__init__(vcode=vfunc, vhook='post',fcode=ffunc, fhook='post') @property def centercoords(self): """The vertex center coordinates as an (N, 3) array of floats.""" return self._centercoords @centercoords.setter def centercoords(self, centercoords): self._centercoords = centercoords self._update_coords_buffer(centercoords) def _update_coords_buffer(self, centercoords): if self._attached and self._visual is not None: self._centercoords_buffer.set_data(centercoords[:,::-1], convert=True) @property def sigmas(self): """The vertex center coordinates as an (N, 3) array of floats.""" return self._sigmas @centercoords.setter def sigmas(self, sigmas): self._sigmas = sigmas self._update_sigmas_buffer(sigmas) def _update_sigmas_buffer(self, sigmas): if self._attached and self._visual is not None: self._sigmas_buffer.set_data(sigmas[:,::-1], convert=True) @property def texcoords(self): """The texture coordinates as an (N, 2) array of floats.""" return self._texcoords @texcoords.setter def texcoords(self, texcoords): self._texcoords = texcoords self._update_texcoords_buffer(texcoords) def _update_texcoords_buffer(self, texcoords): if self._attached or self._visual is not None: self._texcoords_buffer.set_data(texcoords[:,::-1], convert=True) def _attach(self, visual): # the full projection model view self.vshader['transform'] = visual.transforms.get_transform('visual', 'render') # the inverse of it self.vshader['transform_inv'] = visual.transforms.get_transform('render', 'visual') # the modelview self.vshader['camera_inv'] = visual.transforms.get_transform('document', 'scene') # inverse of it self.vshader['camera'] = visual.transforms.get_transform('scene', 'document') super()._attach(visual) class Particles(Surface): """ Billboarded particle layer that renders camera facing quads of given size Can be combined with other (e.g. texture) filter to create particle systems etc """ def __init__(self, coords, size=10, sigmas=(1,1,1), values=1, filter=ShaderFilter('gaussian'), antialias=False, **kwargs): kwargs.setdefault('shading', 'none') kwargs.setdefault('blending', 'additive') coords = np.asarray(coords) sigmas = np.asarray(sigmas, dtype=np.float32) if np.isscalar(values): values = values * np.ones(len(coords)) values = np.broadcast_to(values, len(coords)) size = np.broadcast_to(size, len(coords)) sigmas = np.broadcast_to(sigmas, (len(coords),3)) if not coords.ndim == 2 : raise ValueError(f'coords should be of shape (M,D)') if not len(size)==len(coords)==len(sigmas): raise ValueError() # add dummy z if 2d coords if coords.shape[1] == 2: coords = np.concatenate([np.zeros((len(coords),1)), coords], axis=-1) assert coords.shape[-1]==sigmas.shape[-1]==3 vertices, faces, texcoords = generate_billboards_2d(coords, size=size) # repeat values for each 4 vertices centercoords = np.repeat(coords, 4, axis=0) sigmas = np.repeat(sigmas, 4, axis=0) values = np.repeat(values, 4, axis=0) self._coords = coords self._centercoords = centercoords self._sigmas = sigmas self._size = size self._texcoords = texcoords self._billboard_filter = BillboardsFilter(antialias=antialias) self.filter = filter self._viewer = None super().__init__((vertices, faces, values), **kwargs) def _set_view_slice(self): """Sets the view given the indices to slice with.""" super()._set_view_slice() self._update_billboard_filter() def _update_billboard_filter(self): faces = self._view_faces.flatten() if self._billboard_filter._attached and len(faces)>0: self._billboard_filter.texcoords = self._texcoords[faces] self._billboard_filter.centercoords = self._centercoords[faces][:,-3:] self._billboard_filter.sigmas = self._sigmas[faces][:,-3:] @property def filter(self): """The filter property.""" return self._filter @filter.setter def filter(self, value): if value is None: value = () elif not isinstance(value, Iterable): value = (value,) self._filter = tuple(value) @property def _extent_data(self) -> np.ndarray: """Extent of layer in data coordinates. Returns ------- extent_data : array, shape (2, D) """ if len(self._coords) == 0: extrema =
np.full((2, self.ndim), np.nan)
numpy.full
""" Implement the Numpy backend, and collect timing information with different parameters <NAME> August 26th, 2021 I have set myself beyond the pale. I am nothing. I am hardly human anymore. """ import numpy as np import pickle import time import sys """ ######################################################################################################################## NETWORK STEP Update all of the neural states for 1 timestep """ def stepAll(inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m, refCtr, refPeriod, GmaxNon, GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity, outputSpikeConnectivity, R=20): """ All components are present :param inputConnectivity: Matrix describing routing of input currents :param inputVals: Value of input currents (nA) :param Ulast: Vector of neural states at the previous timestep (mV) :param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm) :param Gm: Vector of membrane conductances (uS) :param Ib: Vector of bias currents (nA) :param thetaLast: Firing threshold at the previous timestep (mV) :param timeFactorThreshold: Vector of constant parameters for each neuron (dt/tauTheta) :param theta0: Vector of initial firing thresholds (mV) :param m: Vector of threshold adaptation ratios :param refCtr: Vector to store remaining timesteps in the refractory period :param refPeriod: Vector of refractory periods :param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS) :param GmaxSpk: Matrix of maximum spiking synaptic conductances (uS) :param Gspike: Matrix of spiking synaptic conductances (uS) :param timeFactorSynapse: Matrix of constant parameters for each synapse (dt/tau_syn) :param DelE: Matrix of synaptic reversal potentials :param outputVoltageConnectivity: Matrix describing routes to output nodes :param outputSpikeConnectivity: Matrix describing routes to output nodes :param R: Neural range (mV) :return: u, u_last, theta_last, g_spike, refCtr, outputVoltages """ start = time.time() Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations Gnon = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon)) Gspike = Gspike * (1 - timeFactorSynapse) Gsyn = Gnon + Gspike Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1) U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential theta = thetaLast + timeFactorThreshold * (-thetaLast + theta0 + m * Ulast) # Update the firing thresholds spikes = np.sign(np.minimum(0, theta + U * (-1 + refCtr))) # Compute which neurons have spiked Gspike = np.maximum(Gspike, (-spikes) * GmaxSpk) # Update the conductance of connections which spiked U = U * (spikes + 1) # Reset the membrane voltages of neurons which spiked refCtr = np.maximum(0, refCtr - spikes * (refPeriod + 1) - 1) # Update refractory periods outputVoltages = np.matmul(outputVoltageConnectivity, U) # Copy desired neural quantities to output nodes outputSpikes = np.matmul(outputSpikeConnectivity, spikes) # Copy desired neural quantities to output nodes Ulast = np.copy(U) # Copy the current membrane voltage to be the past value thetaLast = np.copy(theta) # Copy the current threshold value to be the past value end = time.time() return U, Ulast, thetaLast, Gspike, refCtr, outputVoltages, outputSpikes, end-start def stepNoRef(inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m, GmaxNon, GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity, outputSpikeConnectivity, R=20): """ There is no refractory period :param inputConnectivity: Matrix describing routing of input currents :param inputVals: Value of input currents (nA) :param Ulast: Vector of neural states at the previous timestep (mV) :param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm) :param Gm: Vector of membrane conductances (uS) :param Ib: Vector of bias currents (nA) :param thetaLast: Firing threshold at the previous timestep (mV) :param timeFactorThreshold: Vector of constant parameters for each neuron (dt/tauTheta) :param theta0: Vector of initial firing thresholds (mV) :param m: Vector of threshold adaptation ratios :param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS) :param GmaxSpk: Matrix of maximum spiking synaptic conductances (uS) :param Gspike: Matrix of spiking synaptic conductances (uS) :param timeFactorSynapse: Matrix of constant parameters for each synapse (dt/tau_syn) :param DelE: Matrix of synaptic reversal potentials :param outputVoltageConnectivity: Matrix describing routes to output nodes :param outputSpikeConnectivity: Matrix describing routes to output nodes :param R: Range of neural activity (mV) :return: u, u_last, theta_last, g_spike, outputVoltages, outputSpikes """ start = time.time() Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations Gnon = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon)) Gspike = Gspike * (1 - timeFactorSynapse) Gsyn = Gnon + Gspike Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1) U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential theta = thetaLast + timeFactorThreshold * (-thetaLast + theta0 + m * Ulast) # Update the firing thresholds spikes = np.sign(np.minimum(0, theta - U)) # Compute which neurons have spiked Gspike = np.maximum(Gspike, (-spikes) * GmaxSpk) # Update the conductance of connections which spiked U = U * (spikes + 1) # Reset the membrane voltages of neurons which spiked outputVoltages = np.matmul(outputVoltageConnectivity, U) # Copy desired neural quantities to output nodes outputSpikes = np.matmul(outputSpikeConnectivity, spikes) # Copy desired neural quantities to output nodes Ulast = np.copy(U) # Copy the current membrane voltage to be the past value thetaLast = np.copy(theta) # Copy the current threshold value to be the past value end = time.time() return U, Ulast, thetaLast, Gspike, outputVoltages, outputSpikes, end - start def stepNoSpike(inputConnectivity,inputVals,Ulast,timeFactorMembrane,Gm,Ib,GmaxNon,DelE,outputConnectivity,R=20): """ No neurons can be spiking :param inputConnectivity: Matrix describing routing of input currents :param inputVals: Value of input currents (nA) :param Ulast: Vector of neural states at the previous timestep (mV) :param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm) :param Gm: Vector of membrane conductances (uS) :param Ib: Vector of bias currents (nA) :param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS) :param DelE: Matrix of synaptic reversal potentials :param outputConnectivity: Matrix describing routes to output nodes :param R: Range of neural activity (mV) :return: u, u_last, outputNodes """ start = time.time() Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations Gsyn = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon)) Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1) U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential outputNodes = np.matmul(outputConnectivity,U) # Copy desired neural quantities to output nodes Ulast = np.copy(U) # Copy the current membrane voltage to be the past value end = time.time() return U, Ulast, outputNodes,end-start """ ######################################################################################################################## NETWORK CONSTRUCTION Construct testing networks using specifications """ def constructAll(dt, numNeurons, probConn, perIn, perOut, perSpike, seed=0): """ All elements are present :param dt: Simulation timestep (ms) :param numNeurons: Number of neurons in the network :param probConn: Percent of network which is connected :param perIn: Percent of input nodes in the network :param perOut: Percent of output nodes in the network :param perSpike: Percent of neurons which are spiking :param seed: Random seed :return: All of the parameters required to run a network """ # Inputs numInputs = int(perIn*numNeurons) if numInputs == 0: numInputs = 1 inputVals = np.zeros(numInputs)+1.0 inputConnectivity = np.zeros([numNeurons,numInputs]) + 1 # Construct neurons Ulast = np.zeros(numNeurons) numSpike = int(perSpike*numNeurons) Cm = np.zeros(numNeurons) + 5.0 # membrane capacitance (nF) Gm = np.zeros(numNeurons) + 1.0 # membrane conductance (uS) Ib = np.zeros(numNeurons) + 10.0 # bias current (nA) timeFactorMembrane = dt/Cm # Threshold stuff theta0 = np.zeros(numNeurons) for i in range(numNeurons): if i >= numSpike: theta0[i] = sys.float_info.max else: theta0[i] = 1.0 thetaLast = np.copy(theta0) m = np.zeros(numNeurons) tauTheta = np.zeros(numNeurons)+1.0 timeFactorThreshold = dt/tauTheta # Refractory period refCtr = np.zeros(numNeurons) refPeriod = np.zeros(numNeurons)+1 # Synapses GmaxNon = np.zeros([numNeurons,numNeurons]) GmaxSpk = np.zeros([numNeurons,numNeurons]) Gspike = np.zeros([numNeurons,numNeurons]) DelE = np.zeros([numNeurons,numNeurons]) tauSyn = np.zeros([numNeurons, numNeurons])+1 np.random.seed(seed) for row in range(numNeurons): for col in range(numNeurons): rand = np.random.uniform() if rand < probConn: DelE[row][col] = 100 if theta0[col] < sys.float_info.max: GmaxSpk[row][col] = 1 else: GmaxNon[row][col] = 1 tauSyn[row][col] = 2 timeFactorSynapse = dt/tauSyn # Outputs numOutputs = int(perOut*numNeurons) if numOutputs == 0: numOutputs = 1 outputVoltageConnectivity = np.zeros([numOutputs,numNeurons]) for i in range(numOutputs): outputVoltageConnectivity[i][i] = 1 outputSpikeConnectivity = np.copy(outputVoltageConnectivity) return (inputConnectivity,inputVals,Ulast,timeFactorMembrane,Gm,Ib,thetaLast,timeFactorThreshold,theta0,m,refCtr, refPeriod,GmaxNon,GmaxSpk,Gspike,timeFactorSynapse,DelE,outputVoltageConnectivity,outputSpikeConnectivity) def constructNoRef(dt,numNeurons,perConn,perIn,perOut,perSpike,seed=0): """ No refractory period :param dt: Simulation timestep (ms) :param numNeurons: Number of neurons in the network :param perConn: Percent of network which is connected :param perIn: Percent of input nodes in the network :param perOut: Percent of output nodes in the network :param perSpike: Percent of neurons which are spiking :param seed: Random seed :return: All of the parameters required to run a network """ # Inputs numInputs = int(perIn*numNeurons) inputVals = np.zeros(numInputs)+1.0 inputConnectivity = np.zeros([numNeurons,numInputs]) + 1 # Construct neurons Ulast = np.zeros(numNeurons) numSpike = int(perSpike*numNeurons) Cm = np.zeros(numNeurons) + 5.0 # membrane capacitance (nF) Gm = np.zeros(numNeurons) + 1.0 # membrane conductance (uS) Ib = np.zeros(numNeurons) + 10.0 # bias current (nA) timeFactorMembrane = dt/Cm # Threshold stuff theta0 = np.zeros(numNeurons) for i in range(numNeurons): if i >= numSpike: theta0[i] = sys.float_info.max else: theta0[i] = 1.0 thetaLast = np.copy(theta0) m = np.zeros(numNeurons) tauTheta = np.zeros(numNeurons)+1.0 timeFactorThreshold = dt/tauTheta # Synapses GmaxNon = np.zeros([numNeurons,numNeurons]) GmaxSpk = np.zeros([numNeurons,numNeurons]) Gspike = np.zeros([numNeurons,numNeurons]) DelE = np.zeros([numNeurons,numNeurons]) tauSyn = np.zeros([numNeurons, numNeurons])+1 numSyn = int(perConn*numNeurons*numNeurons) np.random.seed(seed) for row in range(numNeurons): for col in range(numNeurons): rand = np.random.uniform() if rand < probConn: DelE[row][col] = 100 if theta0[col] < sys.float_info.max: GmaxSpk[row][col] = 1 else: GmaxNon[row][col] = 1 tauSyn[row][col] = 2 timeFactorSynapse = dt/tauSyn # Outputs numOutputs = int(perOut*numNeurons) outputVoltageConnectivity = np.zeros([numOutputs, numNeurons]) for i in range(numOutputs): outputVoltageConnectivity[i][i] = 1 outputSpikeConnectivity = np.copy(outputVoltageConnectivity) return (inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m, GmaxNon, GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity, outputSpikeConnectivity) def constructNoSpike(dt,numNeurons,perConn,perIn,perOut,seed=0): """ No spiking elements :param dt: Simulation timestep (ms) :param numNeurons: Number of neurons in the network :param perConn: Percent of network which is connected :param perIn: Percent of input nodes in the network :param perOut: Percent of output nodes in the network :param seed: Random seed :return: All of the parameters required to run a network """ # Inputs numInputs = int(perIn*numNeurons) inputVals = np.zeros(numInputs)+1.0 inputConnectivity =
np.zeros([numNeurons,numInputs])
numpy.zeros
import logging import os from abc import ABC, abstractmethod from copy import deepcopy from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union, cast import numpy as np from tqdm import tqdm from alibi.api.defaults import DEFAULT_DATA_CFRL, DEFAULT_META_CFRL from alibi.api.interfaces import Explainer, Explanation, FitMixin from alibi.explainers.backends.cfrl_base import (generate_empty_condition, get_classification_reward, get_hard_distribution, identity_function) from alibi.utils.frameworks import Framework, has_pytorch, has_tensorflow if TYPE_CHECKING: import tensorflow import torch if has_pytorch: # import pytorch backend from alibi.explainers.backends.pytorch import cfrl_base as pytorch_base_backend if has_tensorflow: # import tensorflow backend from alibi.explainers.backends.tensorflow import cfrl_base as tensorflow_base_backend # define logger logger = logging.getLogger(__name__) class NormalActionNoise: """ Normal noise generator. """ def __init__(self, mu: float, sigma: float) -> None: """ Constructor. Parameters ---------- mu Mean of the normal noise. sigma Standard deviation of the noise. """ self.mu = mu self.sigma = sigma def __call__(self, shape: Tuple[int, ...]) -> np.ndarray: """ Generates normal noise with the appropriate mean and standard deviation. Parameters ---------- shape Shape of the array to be generated Returns ------- Normal noise with the appropriate mean, standard deviation and shape. """ return self.mu + self.sigma * np.random.randn(*shape) def __repr__(self) -> str: return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma) class ReplayBuffer: """ Circular experience replay buffer for `CounterfactualRL` (DDPG). When the buffer is filled, then the oldest experience is replaced by the new one (FIFO). The experience batch size is kept constant and inferred when the first batch of data is stored. Allowing flexible batch size can generate `tensorflow` warning due to the `tf.function` retracing, which can lead to a drop in performance. """ X: np.ndarray #: Inputs buffer. Y_m: np.ndarray #: Model's prediction buffer. Y_t: np.ndarray #: Counterfactual targets buffer. Z: np.ndarray #: Input embedding buffer. Z_cf_tilde: np.ndarray #: Noised counterfactual embedding buffer. R_tilde: np.ndarray #: Noise counterfactual rewards buffer. def __init__(self, size: int = 1000) -> None: """ Constructor. Parameters ---------- size Dimension of the buffer in batch size. This that the total memory allocated is proportional with the `size x batch_size`, where `batch_size` is inferred from the first array to be stored. """ self.idx = 0 # cursor for the buffer self.len = 0 # current length of the buffer self.size = size # buffer's maximum capacity self.batch_size = 0 # batch size (inferred during `append`) self.C: Optional[np.ndarray] = None # buffer for the conditional tensor def append(self, X: np.ndarray, Y_m: np.ndarray, Y_t: np.ndarray, Z: np.ndarray, Z_cf_tilde: np.ndarray, C: Optional[np.ndarray], R_tilde: np.ndarray, **kwargs) -> None: """ Adds experience to the replay buffer. When the buffer is filled, then the oldest experience is replaced by the new one (FIFO). Parameters ---------- X Input array. Y_m Model's prediction class of `X`. Y_t Counterfactual target class. Z Input's embedding. Z_cf_tilde Noised counterfactual embedding. C Conditional array. R_tilde Noised counterfactual reward array. **kwargs Other arguments. Not used. """ # Initialize the buffers. if not hasattr(self, 'X'): self.batch_size = X.shape[0] # Allocate memory. self.X = np.zeros((self.size * self.batch_size, *X.shape[1:]), dtype=np.float32) self.Y_m = np.zeros((self.size * self.batch_size, *Y_m.shape[1:]), dtype=np.float32) self.Y_t = np.zeros((self.size * self.batch_size, *Y_t.shape[1:]), dtype=np.float32) self.Z = np.zeros((self.size * self.batch_size, *Z.shape[1:]), dtype=np.float32) self.Z_cf_tilde = np.zeros((self.size * self.batch_size, *Z_cf_tilde.shape[1:]), dtype=np.float32) self.R_tilde = np.zeros((self.size * self.batch_size, *R_tilde.shape[1:]), dtype=np.float32) # Conditional tensor can be `None` when no condition is included. If it is not `None`, allocate memory. if C is not None: self.C = np.zeros((self.size * self.batch_size, *C.shape[1:]), dtype=np.float32) # Increase the length of the buffer if not full. if self.len < self.size: self.len += 1 # Compute the first position where to add most recent experience. start = self.batch_size * self.idx # Add new data / replace old experience (note that a full batch is added at once). self.X[start:start + self.batch_size] = X self.Y_m[start:start + self.batch_size] = Y_m self.Y_t[start:start + self.batch_size] = Y_t self.Z[start:start + self.batch_size] = Z self.Z_cf_tilde[start:start + self.batch_size] = Z_cf_tilde self.R_tilde[start:start + self.batch_size] = R_tilde if C is not None: self.C = cast(np.ndarray, self.C) # helping mypy out as self.C cannot be None at this point self.C[start:start + self.batch_size] = C # Compute the next index. Not that if the buffer reached its maximum capacity, for the next iteration # we start replacing old batches. self.idx = (self.idx + 1) % self.size def sample(self) -> Dict[str, Optional[np.ndarray]]: """ Sample a batch of experience form the replay buffer. Returns ------- A batch experience. For a description of the keys and values returned, see parameter descriptions \ in :py:meth:`alibi.explainers.cfrl_base.ReplayBuffer.append` method. The batch size returned is the same \ as the one passed in the :py:meth:`alibi.explainers.cfrl_base.ReplayBuffer.append`. """ # Generate random indices to be sampled. rand_idx = np.random.randint(low=0, high=self.len * self.batch_size, size=(self.batch_size,)) # Extract data form buffers. X = self.X[rand_idx] # input array Y_m = self.Y_m[rand_idx] # model's prediction Y_t = self.Y_t[rand_idx] # counterfactual target Z = self.Z[rand_idx] # input embedding Z_cf_tilde = self.Z_cf_tilde[rand_idx] # noised counterfactual embedding C = self.C[rand_idx] if (self.C is not None) else None # conditional array if exists R_tilde = self.R_tilde[rand_idx] # noised counterfactual reward return { "X": X, "Y_m": Y_m, "Y_t": Y_t, "Z": Z, "Z_cf_tilde": Z_cf_tilde, "C": C, "R_tilde": R_tilde } DEFAULT_BASE_PARAMS = { "act_noise": 0.1, "act_low": -1.0, "act_high": 1.0, "replay_buffer_size": 1000, "batch_size": 100, "num_workers": 4, "shuffle": True, "exploration_steps": 100, "update_every": 1, "update_after": 10, "train_steps": 100000, "backend": "tensorflow", "encoder_preprocessor": identity_function, "decoder_inv_preprocessor": identity_function, "reward_func": get_classification_reward, "postprocessing_funcs": [], "conditional_func": generate_empty_condition, "callbacks": [], "actor": None, "critic": None, "optimizer_actor": None, "optimizer_critic": None, "lr_actor": 1e-3, "lr_critic": 1e-3, "actor_hidden_dim": 256, "critic_hidden_dim": 256, } """ Default Counterfactual with Reinforcement Learning parameters. - ``'act_noise'`` : ``float`` - standard deviation for the normal noise added to the actor for exploration. - ``'act_low'`` : ``float`` - minimum action value. Each action component takes values between \ `[act_low, act_high]`. - ``'act_high'`` : ``float`` - maximum action value. Each action component takes values between \ `[act_low, act_high]`. - ``'replay_buffer_size'`` : ``int`` - dimension of the replay buffer in `batch_size` units. The total memory \ allocated is proportional with the `size x batch_size`. - ``'batch_size'`` : ``int`` - training batch size. - ``'num_workers'`` : ``int`` - number of workers used by the data loader if ``'pytorch'`` backend is selected. - ``'shuffle'`` : ``bool`` - whether to shuffle the datasets every epoch. - ``'exploration_steps'`` : ``int`` - number of exploration steps. For the first `exploration_steps`, the \ counterfactual embedding coordinates are sampled uniformly at random from the interval `[act_low, act_high]`. - ``'update_every'`` : ``int`` - number of steps that should elapse between gradient updates. Regardless of the \ waiting steps, the ratio of waiting steps to gradient steps is locked to 1. - ``'update_after'`` : ``int`` - number of steps to wait before start updating the actor and critic. This ensures \ that the replay buffers is full enough for useful updates. - ``'backend'`` : ``str`` - backend to be used: ``'tensorflow'`` | ``'pytorch'``. Default ``'tensorflow'``. - ``'train_steps'`` : ``int`` - number of train steps. - ``'encoder_preprocessor'`` : ``Callable`` - encoder/auto-encoder data preprocessors. Transforms the input data \ into the format expected by the auto-encoder. By default, the identity function. - ``'decoder_inv_preprocessor'`` : ``Callable`` - decoder/auto-encoder data inverse preprocessor. Transforms data \ from the auto-encoder output format to the original input format. Before calling the prediction function, the \ data is inverse preprocessed to match the original input format. By default, the identity function. - ``'reward_func'`` : ``Callable`` - element-wise reward function. By default, considers classification task and \ checks if the counterfactual prediction label matches the target label. Note that this is element-wise, so a \ tensor is expected to be returned. - ``'postprocessing_funcs'`` : ``List[Postprocessing]`` - list of post-processing functions. The function are \ applied in the order, from low to high index. Non-differentiable post-processing can be applied. The function \ expects as arguments `X_cf` - the counterfactual instance, `X` - the original input instance and `C` - the \ conditional vector, and returns the post-processed counterfactual instance `X_cf_pp` which is passed as `X_cf` \ for the following functions. By default, no post-processing is applied (empty list). - ``'conditional_func'`` : ``Callable`` - generates a conditional vector given a pre-processed input instance. By \ default, the function returns ``None`` which is equivalent to no conditioning. - ``'callbacks'`` : ``List[Callback]`` - list of callback functions applied at the end of each training step. - ``'actor'`` : ``Optional[Union[tensorflow.keras.Model, torch.nn.Module]]`` - actor network. - ``'critic;`` : ``Optional[Union[tensorflow.keras.Model, torch.nn.Module]]`` - critic network. - ``'optimizer_actor'`` : ``Optional[Union[tensorflow.keras.optimizers.Optimizer, torch.optim.Optimizer]]`` - \ actor optimizer. - ``'optimizer_critic'`` : ``Optional[Union[tensorflow.keras.optimizer.Optimizer, torch.optim.Optimizer]]`` - \ critic optimizer. - ``'lr_actor'`` : ``float`` - actor learning rate. - ``'lr_critic'`` : ``float`` - critic learning rate. - ``'actor_hidden_dim'`` : ``int`` - actor hidden layer dimension. - ``'critic_hidden_dim'`` : ``int`` - critic hidden layer dimension. """ _PARAM_TYPES = { "primitives": [ "act_noise", "act_low", "act_high", "replay_buffer_size", "batch_size", "num_workers", "shuffle", "exploration_steps", "update_every", "update_after", "train_steps", "backend", "actor_hidden_dim", "critic_hidden_dim", ], "complex": [ "encoder_preprocessor", "decoder_inv_preprocessor", "reward_func", "postprocessing_funcs", "conditional_func", "callbacks", "actor", "critic", "optimizer_actor", "optimizer_critic", "encoder", "decoder", "predictor", "sparsity_loss", "consistency_loss", ] } """ Parameter types for serialization - ``'primitives'`` : List[str] - list of parameters having primitive data types. - ``'complex'`` : List[str] - list of parameters having complex data types (e.g., functions, models,\ optimizers etc.). """ class CounterfactualRL(Explainer, FitMixin): """ Counterfactual Reinforcement Learning. """ def __init__(self, predictor: Callable[[np.ndarray], np.ndarray], encoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', decoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', coeff_sparsity: float, coeff_consistency: float, latent_dim: Optional[int] = None, backend: str = "tensorflow", seed: int = 0, **kwargs): """ Constructor. Parameters ---------- predictor A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs. For classification task, the second dimension of the output should match the number of classes. Thus, the output can be either a soft label distribution or a hard label distribution (i.e. one-hot encoding) without affecting the performance since `argmax` is applied to the predictor's output. encoder Pretrained encoder network. decoder Pretrained decoder network. coeff_sparsity Sparsity loss coefficient. coeff_consistency Consistency loss coefficient. latent_dim Auto-encoder latent dimension. Can be omitted if the actor network is user specified. backend Deep learning backend: ``'tensorflow'`` | ``'pytorch'``. Default ``'tensorflow'``. seed Seed for reproducibility. The results are not reproducible for ``'tensorflow'`` backend. **kwargs Used to replace any default parameter from :py:data:`alibi.explainers.cfrl_base.DEFAULT_BASE_PARAMS`. """ super().__init__(meta=deepcopy(DEFAULT_META_CFRL)) # Clean backend flag. backend = backend.strip().lower() # Verify backend installed CounterfactualRL._verify_backend(backend) # Select backend. self.backend = self._select_backend(backend, **kwargs) # Set seed for reproducibility. self.backend.set_seed(seed) # Validate arguments. self.params = self._validate_kwargs(predictor=predictor, encoder=encoder, decoder=decoder, latent_dim=latent_dim, coeff_sparsity=coeff_sparsity, coeff_consistency=coeff_consistency, backend=backend, seed=seed, **kwargs) # If pytorch backend, the if GPU available, send everything to GPU if self.params["backend"] == Framework.PYTORCH: from alibi.explainers.backends.pytorch.cfrl_base import get_device self.params.update({"device": get_device()}) # Send encoder and decoder to device. self.params["encoder"].to(self.params["device"]) self.params["decoder"].to(self.params["device"]) # Sent actor and critic to device. self.params["actor"].to(self.params["device"]) self.params["critic"].to(self.params["device"]) # Update meta-data with all parameters passed (correct and incorrect). self.meta["params"].update(CounterfactualRL._serialize_params(self.params)) @staticmethod def _serialize_params(params: Dict[str, Any]) -> Dict[str, Any]: """ Parameter serialization. The function replaces object by human-readable representation. Parameters ---------- params Dictionary of parameters to be serialized. Returns ------- Human-readable replacement of data. """ meta = dict() for param, value in params.items(): if param in _PARAM_TYPES["primitives"]: # primitive types are passed as they are meta.update({param: value}) elif param in _PARAM_TYPES["complex"]: if isinstance(value, list): # each complex element in the list is serialized by replacing it with a name meta.update({param: [CounterfactualRL._get_name(v) for v in value]}) else: # complex element is serialized by replacing it with a name meta.update({param: CounterfactualRL._get_name(value)}) else: # Unknown parameters are passed as they are. TODO: think of a better way to handle this. meta.update({param: value}) return meta @staticmethod def _get_name(a: Any) -> str: """ Constructs a name for the given object. If the object has as built-in name, the name is return. If the object has a built-in class name, the name of the class is returned. Otherwise ``'unknown'`` is returned. Parameters ---------- a Object to give the name for. Returns ------- Name of the object. """ if hasattr(a, "__name__"): return a.__name__ if hasattr(a, "__class__"): return str(a.__class__) return "unknown" @staticmethod def _verify_backend(backend: str): """ Verifies if the backend is supported. Parameters ---------- backend Backend to be checked. """ # Check if pytorch/tensorflow backend supported. if (backend == Framework.PYTORCH and not has_pytorch) or \ (backend == Framework.TENSORFLOW and not has_tensorflow): raise ImportError(f'{backend} not installed. Cannot initialize and run the CounterfactualRL' f' with {backend} backend.') # Allow only pytorch and tensorflow. elif backend not in [Framework.PYTORCH, Framework.TENSORFLOW]: raise NotImplementedError(f'{backend} not implemented. Use `tensorflow` or `pytorch` instead.') def _select_backend(self, backend: str, **kwargs): """ Selects the backend according to the `backend` flag. Parameters --------- backend Deep learning backend: ``'tensorflow'`` | ``'pytorch'``. Default `tensorflow`. **kwargs Other arguments. Not used. """ return tensorflow_base_backend if backend == "tensorflow" else pytorch_base_backend def _validate_kwargs(self, predictor: Callable, encoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', decoder: 'Union[tensorflow.keras.Model, torch.nn.Module]', latent_dim: Optional[int], coeff_sparsity: float, coeff_consistency: float, backend: str, seed: int, **kwargs): """ Validates arguments. Parameters ---------- predictor. A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs. encoder Pretrained encoder network. decoder Pretrained decoder network. latent_dim Auto-encoder latent dimension. coeff_sparsity Sparsity loss coefficient. coeff_consistency Consistency loss coefficient. backend Deep learning backend: ``'tensorflow'`` | ``'pytorch'``. **kwargs Other arguments. """ # Copy default parameters. params = deepcopy(DEFAULT_BASE_PARAMS) # Update parameters with mandatory arguments params.update({ "encoder": encoder, "decoder": decoder, "latent_dim": latent_dim, "predictor": predictor, "coeff_sparsity": coeff_sparsity, "coeff_consistency": coeff_consistency, "backend": backend, "seed": seed, }) # Add actor if not user-specified. not_specified = {"actor": False, "critic": False} if "actor" not in kwargs: not_specified["actor"] = True params["actor"] = self.backend.get_actor(hidden_dim=params["actor_hidden_dim"], output_dim=params["latent_dim"]) if "critic" not in kwargs: not_specified["critic"] = True params["critic"] = self.backend.get_critic(hidden_dim=params["critic_hidden_dim"]) # Add optimizers if not user-specified. optimizers = ["optimizer_actor", "optimizer_critic"] for optim in optimizers: # extract model in question model_name = optim.split("_")[1] model = params[model_name] lr = params["lr_" + model_name] # If the optimizer is user-specified, just update the params if optim in kwargs: params.update({optim: kwargs[optim]}) if self.params["backend"] == Framework.PYTORCH and not_specified[model_name]: raise ValueError(f"Can not specify {optim} when {model_name} not specified for pytorch backend.") # If the optimizer is not user-specified, it need to be initialized. The initialization is backend specific. elif params['backend'] == Framework.TENSORFLOW: params.update({optim: self.backend.get_optimizer(lr=lr)}) else: params.update({optim: self.backend.get_optimizer(model=model, lr=lr)}) # Add sparsity loss if not user-specified. params["sparsity_loss"] = self.backend.sparsity_loss if "sparsity_loss" not in kwargs \ else kwargs["sparsity_loss"] # Add consistency loss if not user-specified. params["consistency_loss"] = self.backend.consistency_loss if "consistency_loss" not in kwargs \ else kwargs["consistency_loss"] # Validate arguments. allowed_keys = set(params.keys()) provided_keys = set(kwargs.keys()) common_keys = allowed_keys & provided_keys # Check if some provided keys are incorrect if len(common_keys) < len(provided_keys): incorrect_keys = ", ".join(provided_keys - common_keys) logger.warning("The following keys are incorrect: " + incorrect_keys) # Update default parameters and all parameters params.update({key: kwargs[key] for key in common_keys}) return params @classmethod def load(cls, path: Union[str, os.PathLike], predictor: Any) -> "Explainer": return super().load(path, predictor) def reset_predictor(self, predictor: Any) -> None: """ Resets the predictor. Parameters ---------- predictor New predictor. """ self.params["predictor"] = predictor self.meta["params"].update(CounterfactualRL._serialize_params(self.params)) def save(self, path: Union[str, os.PathLike]) -> None: super().save(path) def fit(self, X: np.ndarray) -> "Explainer": """ Fit the model agnostic counterfactual generator. Parameters ---------- X Training data array. Returns ------- self The explainer itself. """ # Define boolean flag for initializing actor and critic network for Tensorflow backend. initialize_actor_critic = False # Define replay buffer (this will deal only with numpy arrays). replay_buff = ReplayBuffer(size=self.params["replay_buffer_size"]) # Define noise variable. noise = NormalActionNoise(mu=0, sigma=self.params["act_noise"]) # Define data generator. data_generator = self.backend.data_generator(X=X, **self.params) data_iter = iter(data_generator) for step in tqdm(range(self.params["train_steps"])): # Sample training data. try: data = next(data_iter) except StopIteration: if hasattr(data_generator, "on_epoch_end"): # This is just for tensorflow backend. data_generator.on_epoch_end() data_iter = iter(data_generator) data = next(data_iter) # Add None condition if condition does not exist. if "C" not in data: data["C"] = None # Compute input embedding. Z = self.backend.encode(X=data["X"], **self.params) data.update({"Z": Z}) # Compute counterfactual embedding. Z_cf = self.backend.generate_cf(**data, **self.params) data.update({"Z_cf": Z_cf}) # Add noise to the counterfactual embedding. Z_cf_tilde = self.backend.add_noise(noise=noise, step=step, **data, **self.params) data.update({"Z_cf_tilde": Z_cf_tilde}) # Decode noised counterfactual and apply postprocessing step to X_cf_tilde. X_cf_tilde = self.backend.decode(Z=data["Z_cf_tilde"], **self.params) for pp_func in self.params["postprocessing_funcs"]: # Post-process noised counterfactual. X_cf_tilde = pp_func(self.backend.to_numpy(X_cf_tilde), self.backend.to_numpy(data["X"]), self.backend.to_numpy(data["C"])) data.update({"X_cf_tilde": X_cf_tilde}) # Compute model's prediction on the noised counterfactual X_cf_tilde = self.params["decoder_inv_preprocessor"](self.backend.to_numpy(data["X_cf_tilde"])) Y_m_cf_tilde = self.params["predictor"](X_cf_tilde) # Compute reward. R_tilde = self.params["reward_func"](self.backend.to_numpy(Y_m_cf_tilde), self.backend.to_numpy(data["Y_t"])) data.update({"R_tilde": R_tilde, "Y_m_cf_tilde": Y_m_cf_tilde}) # Store experience in the replay buffer. data = {key: self.backend.to_numpy(data[key]) for key in data.keys()} replay_buff.append(**data) if step % self.params['update_every'] == 0 and step > self.params["update_after"]: for i in range(self.params['update_every']): # Sample batch of experience form the replay buffer. sample = replay_buff.sample() # Initialize actor and critic. This is required for tensorflow in order to reinitialize the # explainer object and call fit multiple times. If the models are not reinitialized, the # error: "tf.function-decorated function tried to create variables on non-first call" is raised. # This is due to @tf.function and building the model for the first time in a compiled function if not initialize_actor_critic and self.params["backend"] == Framework.TENSORFLOW: self.backend.initialize_actor_critic(**sample, **self.params) self.backend.initialize_optimizers(**sample, **self.params) initialize_actor_critic = True if "C" not in sample: sample["C"] = None # Decode counterfactual. This procedure has to be done here and not in the experience loop # since the actor is updating but old experience is used. Thus, the decoding of the counterfactual # will not correspond to the latest actor network. Remember that the counterfactual is used # for the consistency loss. The counterfactual generation is performed here due to @tf.function # which does not allow all post-processing functions. Z_cf = self.backend.generate_cf(Z=self.backend.to_tensor(sample["Z"], **self.params), Y_m=self.backend.to_tensor(sample["Y_m"], **self.params), Y_t=self.backend.to_tensor(sample["Y_t"], **self.params), C=self.backend.to_tensor(sample["C"], **self.params), **self.params) X_cf = self.backend.decode(Z=Z_cf, **self.params) for pp_func in self.params["postprocessing_funcs"]: # Post-process counterfactual. X_cf = pp_func(self.backend.to_numpy(X_cf), self.backend.to_numpy(sample["X"]), self.backend.to_numpy(sample["C"])) # Add counterfactual instance to the sample to be used in the update function for consistency loss sample.update({"Z_cf": self.backend.to_numpy(Z_cf), "X_cf": self.backend.to_numpy(X_cf)}) # Update critic by one-step gradient descent. losses = self.backend.update_actor_critic(**sample, **self.params) # Convert all losses from tensors to numpy arrays. losses = {key: self.backend.to_numpy(losses[key]).item() for key in losses.keys()} # Call all callbacks. for callback in self.params["callbacks"]: callback(step=step, update=i, model=self, sample=sample, losses=losses) return self @staticmethod def _validate_target(Y_t: Optional[np.ndarray]): """ Validate the targets by checking the dimensions. Parameters ---------- Y_t Targets to be checked. """ if Y_t is None: raise ValueError("Target can not be `None`.") if len(Y_t.shape) not in [1, 2]: raise ValueError(f"Target shape should be at least 1 and at most 2. Found {len(Y_t.shape)} instead.") @staticmethod def _validate_condition(C: Optional[np.ndarray]): """ Validate condition vector. Parameters ---------- C Condition vector. """ if (C is not None) and len(C.shape) != 2: raise ValueError(f"Condition vector shape should be 2. Found {len(C.shape)} instead.") @staticmethod def _is_classification(pred: np.ndarray) -> bool: """ Check if the prediction task is classification by looking at the model's prediction shape. Parameters ---------- pred Model's prediction. Returns ------- ``True`` if the prediction has shape of 2 and the second dimension bigger grater than 1. ``False`` otherwise. """ return len(pred.shape) == 2 and pred.shape[1] > 1 def explain(self, # type: ignore[override] X: np.ndarray, Y_t: np.ndarray, C: Optional[np.ndarray] = None, batch_size: int = 100) -> Explanation: """ Explains an input instance Parameters ---------- X Instances to be explained. Y_t Counterfactual targets. C Conditional vectors. If ``None``, it means that no conditioning was used during training (i.e. the `conditional_func` returns ``None``). batch_size Batch size to be used when generating counterfactuals. Returns ------- explanation `Explanation` object containing the counterfactual with additional metadata as attributes. \ See usage at `CFRL examples`_ for details. .. _CFRL examples: https://docs.seldon.io/projects/alibi/en/latest/methods/CFRL.html """ # General validation. self._validate_target(Y_t) self._validate_condition(C) # Check the number of target labels. if Y_t.shape[0] != 1 and Y_t.shape[0] != X.shape[0]: raise ValueError("The number target labels should be 1 or equals the number of samples in X.") # Check the number of conditional vectors if (C is not None) and C.shape[0] != 1 and C.shape[0] != X.shape[0]: raise ValueError("The number of conditional vectors should be 1 or equals the number if samples in X.") # Transform target into a 2D array. Y_t = Y_t.reshape(Y_t.shape[0], -1) # Repeat the same label to match the number of input instances. if Y_t.shape[0] == 1: Y_t =
np.tile(Y_t, (X.shape[0], 1))
numpy.tile
"""LQR, iLQR and MPC.""" import numpy as np import scipy #.linalg.solve_continuous_are as func from ipdb import set_trace as debug def prRed(prt): print("\033[91m {}\033[00m" .format(prt)) def prGreen(prt): print("\033[92m {}\033[00m" .format(prt)) def prYellow(prt): print("\033[93m {}\033[00m" .format(prt)) def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt)) def prPurple(prt): print("\033[95m {}\033[00m" .format(prt)) def prCyan(prt): print("\033[96m {}\033[00m" .format(prt)) def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt)) def prBlack(prt): print("\033[98m {}\033[00m" .format(prt)) def simulate_dynamics(env, x, u, dt=1e-5): """Step simulator to see how state changes. Parameters ---------- env: gym.core.Env The environment you are try to control. In this homework the 2 link arm. x: np.array The state to test. When approximating A you will need to perturb this. u: np.array The command to test. When approximating B you will need to perturb this. dt: float, optional The time step to simulate. In general the smaller the time step the more accurate the gradient approximation. Returns ------- xdot: np.array This is the **CHANGE** in x. i.e. (x[1] - x[0]) / dt If you return x you will need to solve a different equation in your LQR controller. """ env.dt = dt env.state = x.copy() x1, _, _, _ = env.step(u) xdot = (x1-x) / dt return xdot def approximate_A(env, x, u, delta=1e-5, dt=1e-5): """Approximate A matrix using finite differences. Parameters ---------- env: gym.core.Env The environment you are try to control. In this homework the 2 link arm. x: np.array The state to test. You will need to perturb this. u: np.array The command to test. delta: float How much to perturb the state by. dt: float, optional The time step to simulate. In general the smaller the time step the more accurate the gradient approximation. Returns ------- A: np.array The A matrix for the dynamics at state x and command u. """ state_dim = x.shape[0] action_dim = u.shape[0] A = np.zeros((state_dim, state_dim)) xs_inc = np.tile(x,(state_dim,1)) + delta * np.eye(state_dim) xs_dec = np.tile(x,(state_dim,1)) - delta * np.eye(state_dim) for idx, (x_inc, x_dec) in enumerate(zip(xs_inc, xs_dec)): # calculate partial differential w.r.t. x state_inc = simulate_dynamics(env, x_inc, u.copy(), dt) state_dec = simulate_dynamics(env, x_dec, u.copy(), dt) A[:, idx] = (state_inc - state_dec) / (2 * delta) return A def approximate_B(env, x, u, delta=1e-5, dt=1e-5): """Approximate B matrix using finite differences. Parameters ---------- env: gym.core.Env The environment you are try to control. In this homework the 2 link arm. x: np.array The state to test. u: np.array The command to test. You will ned to perturb this. delta: float How much to perturb the state by. dt: float, optional The time step to simulate. In general the smaller the time step the more accurate the gradient approximation. Returns ------- B: np.array The B matrix for the dynamics at state x and command u. """ state_dim = x.shape[0] action_dim = u.shape[0] B = np.zeros((state_dim, action_dim)) us_inc = np.tile(u,(action_dim,1)) + delta * np.eye(action_dim) us_dec = np.tile(u,(action_dim,1)) - delta * np.eye(action_dim) # print(us_dec, us_inc) for idx, (u_inc, u_dec) in enumerate(zip(us_inc, us_dec)): # calculate partial differential w.r.t. u state_inc = simulate_dynamics(env, x.copy(), u_inc, dt) state_dec = simulate_dynamics(env, x.copy(), u_dec, dt) B[:, idx] = (state_inc - state_dec) / (2 * delta) return B u = None def calc_lqr_input(env, sim_env, debug_flag=False): """Calculate the optimal control input for the given state. If you are following the API and simulate dynamics is returning xdot, then you should use the scipy.linalg.solve_continuous_are function to solve the Ricatti equations. Parameters ---------- env: gym.core.Env This is the true environment you will execute the computed commands on. Use this environment to get the Q and R values as well as the state. sim_env: gym.core.Env A copy of the env class. Use this to simulate the dynamics when doing finite differences. Returns ------- u: np.array The command to execute at this point. """ # prepare whatever need later global u state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] x = env.state.copy() goal_q = env.goal_q.copy() goal_dq = env.goal_dq.copy() Q = env.Q.copy() R = env.R.copy() if u is None: u = np.zeros(action_dim) # calcuate A and B matrix A = approximate_A(sim_env, x.copy(), u.copy()) if debug_flag : prGreen('A={}'.format(A)) assert(A.shape == (state_dim, state_dim)) B = approximate_B(sim_env, x.copy(), u.copy()) if debug_flag : prYellow('B={}'.format(B)) assert(B.shape == (state_dim, action_dim)) # solve CARE, return zero if raise error try: X = scipy.linalg.solve_continuous_are(A, B, Q, R) except: return np.zeros(action_dim) # calculate K gain K = np.dot(np.linalg.pinv(R), np.dot(B.T, X)) if debug_flag : prRed('K={}'.format(K)) # calcuate action x_target = x[:2]-goal_q u = np.hstack((x_target, x[2:])) u = -
np.dot(K, u)
numpy.dot
# Copyright 2021 The Distla Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Tests for the distributed eigh solver. """ import pytest import random from jax import lax import jax.numpy as jnp import numpy as np from distla_core.linalg.eigh import eigh from distla_core.utils import misc from distla_core.utils import pops from distla_core.linalg.utils import testutils Ns = [24, 48] # TODO: Test fat SVD m_factors = [1, 2] p_szs = [256, ] seeds = [1, ] precisions = [lax.Precision.HIGHEST, ] bools = [False, True] padding = [None, 4] minimum_ranks = [4, 48] def _subspace_angle(subspace_1, subspace_2, orth1=True, orth2=True): if orth1: subspace_1, _ = np.linalg.qr(subspace_1) if orth2: subspace_2, _ = np.linalg.qr(subspace_2) product = np.dot(subspace_1.conj().T, subspace_2) return np.arccos(np.linalg.norm(product, ord=-2)) @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("m_factor", m_factors) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) def test_similarity_transform(N, m_factor, p_sz, precision): """ Tests that eigh._similarity_transform indeed computes B^H @ A @ B, by comparing up against jnp.dot at the given precision up to eps * |B|_F^2 |A|_F. Here A is (N * m_factor, N * m_factor) and B is (N * m_factor, N). """ np.random.seed(10) A = np.random.randn(N * m_factor, N * m_factor).astype(np.float32) B = np.random.randn(N * m_factor, N).astype(np.float32) normA = np.linalg.norm(A) normB = np.linalg.norm(B) C = jnp.dot(A, B, precision=precision) C = jnp.dot(B.conj().T, C, precision=precision) A = pops.distribute(A) B = pops.distribute(B) @pops.pmap def test_f(A, V): return eigh._similarity_transform(A, V, p_sz, precision=precision) result = test_f(A, B) result = pops.undistribute(result) atol = normB * normA * normB * testutils.eps(precision) np.testing.assert_allclose(C, result, atol=atol) @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) @pytest.mark.parametrize("canonical", bools) @pytest.mark.parametrize("seed", seeds) def test_split_spectrum(N, p_sz, precision, canonical, seed): """ Tests that split spectrum correctly divides a Hermitian matrix into blocks. """ np.random.seed(seed) H = np.random.randn(N, N).astype(np.float32) H = jnp.array(0.5 * (H + H.conj().T)) ev_exp, eV_exp = jnp.linalg.eigh(H) Hd = pops.distribute(H) @pops.pmap def median_ev_func(H): return pops.trace(H) / N if canonical: sigma = N // 2 else: sigma = median_ev_func(Hd)[0] split_1, split_2 = eigh.split_spectrum( Hd, N, sigma, prior_isometry=None, p_sz=p_sz, precision=precision, canonical=canonical) H1d, V1d, k1, _ = split_1 H2d, V2d, k2, _ = split_2 H1 = pops.undistribute(H1d) V1 = pops.undistribute(V1d) H2 = pops.undistribute(H2d) V2 = pops.undistribute(V2d) H1 = H1[:k1, :k1] H2 = H2[:k2, :k2] V1 = V1[:, :k1] V2 = V2[:, :k2] ev_Hm, _ = np.linalg.eigh(H1) ev_Hp, _ = np.linalg.eigh(H2) proj_1 = np.dot(np.dot(V1.conj().T, H), V1) ev_p1, _ = np.linalg.eigh(proj_1) proj_2 = np.dot(np.dot(V2.conj().T, H), V2) ev_p2, _ = np.linalg.eigh(proj_2) eps = testutils.eps(precision) np.testing.assert_allclose(jnp.sort(ev_p1), jnp.sort(ev_Hm), atol=10 * eps * jnp.linalg.norm(ev_Hm)) np.testing.assert_allclose(jnp.sort(ev_p2), jnp.sort(ev_Hp), atol=10 * eps * jnp.linalg.norm(ev_Hp)) np.testing.assert_allclose(ev_exp[:k1], ev_Hm, atol=10 * eps * jnp.linalg.norm(H1)) np.testing.assert_allclose(ev_exp[k1:], ev_Hp, atol=10 * eps * jnp.linalg.norm(H2)) @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) @pytest.mark.parametrize("seed", seeds) def test_project_H(N, p_sz, precision, seed): np.random.seed(seed) H = np.random.randn(N, N).astype(np.float32) H = 0.5 * (H + H.conj().T) ev, eV = np.linalg.eigh(H) k_target = N // 2 P = np.dot(eV[:, :k_target], eV[:, :k_target].conj().T) P_bar = np.dot(eV[:, k_target:], eV[:, k_target:].conj().T) rank_1, dim_1, rank_2, dim_2 = eigh._padded_ranks(N, k_target) out = eigh._project_H( pops.distribute(P), pops.distribute(H), rank_1, dim_1, rank_2, dim_2, None, p_sz, precision) H1, Vk1, info1, H2, Vk2, info2 = out Vk1 = pops.undistribute(Vk1) Vk2 = pops.undistribute(Vk2) ev1 = np.linalg.eigvalsh(pops.undistribute(H1)) ev2 = np.linalg.eigvalsh(pops.undistribute(H2)) eps = testutils.eps(precision) P_recon = np.dot(Vk1, Vk1.conj().T) Pbar_recon = np.dot(Vk2, Vk2.conj().T) Pscale = 10 * eps * jnp.linalg.norm(P) Hscale = 10 * eps * jnp.linalg.norm(H) np.testing.assert_allclose(P_recon, P, atol=Pscale) np.testing.assert_allclose(Pbar_recon, P_bar, atol=Pscale) angle_1 = _subspace_angle(eV[:, :k_target], Vk1) angle_2 = _subspace_angle(eV[:, k_target:], Vk2) assert angle_1 < 1E-3 assert angle_2 < 1E-3 np.testing.assert_allclose(ev1, ev[:k_target], atol=Hscale) np.testing.assert_allclose(ev2, ev[k_target:], atol=Hscale) @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) @pytest.mark.parametrize("seed", seeds) @pytest.mark.parametrize("canonical", bools) @pytest.mark.parametrize("padding", padding) @pytest.mark.parametrize("minimum_rank", minimum_ranks) @pytest.mark.parametrize("dtype", [np.float32, ]) def test_eigh_full( N, p_sz, precision, seed, canonical, padding, minimum_rank, dtype): """ Tests that the results of eigh satisfy the eigenvalue equation. """ if padding is not None: unpadded_dim = N - padding else: unpadded_dim = N np.random.seed(seed) H = np.random.randn(N, N).astype(dtype) H[unpadded_dim:, :] = 0. H[:, unpadded_dim:] = 0. H = jnp.array(0.5 * (H + H.conj().T)) ev_exp, eV_exp = jnp.linalg.eigh(H) Hp = pops.distribute(H) evs, V = eigh.eigh( Hp, p_sz=p_sz, precision=precision, canonical=canonical, unpadded_dim=unpadded_dim, minimum_rank=minimum_rank) V = pops.undistribute(V) testutils.test_unitarity( V[:unpadded_dim, :unpadded_dim], eps_coef=jnp.linalg.norm(H) * 10) HV = jnp.dot(H, V, precision=lax.Precision.HIGHEST) vV = evs * V angle = _subspace_angle(eV_exp, V) assert angle < 1E-3 eps = testutils.eps(precision) atol = jnp.linalg.norm(H) * 10 * eps np.testing.assert_allclose(ev_exp, jnp.sort(evs), atol=10 * atol) np.testing.assert_allclose(HV, vV, atol=10 * atol) # TODO: Support padding in SVD. @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("m_factor", m_factors) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) @pytest.mark.parametrize("seed", seeds) @pytest.mark.parametrize("canonical", bools) @pytest.mark.parametrize("minimum_rank", minimum_ranks) def test_svd(N, m_factor, p_sz, precision, seed, canonical, minimum_rank): """ Tests that svd produces a valid SVD. """ np.random.seed(seed) matrix = np.random.randn(N * m_factor, N).astype(np.float32) Unp, S_svd, Vnp = np.linalg.svd(matrix) Vnp = Vnp.conj().T eps = 100 * testutils.eps(precision, dtype=matrix.dtype) U, S, V = eigh.svd( pops.distribute(matrix), p_sz=p_sz, precision=precision, canonical=canonical, minimum_rank=minimum_rank) U = pops.undistribute(U) V = pops.undistribute(V) angle_U = _subspace_angle(U, Unp) angle_V = _subspace_angle(V, Vnp) assert angle_U < 1E-3 # Note: I pulled this number out of a hat. assert angle_V < 1E-3 # The singular values agree with the numpy results. S_sorted = np.sort(S)[::-1] np.testing.assert_allclose( S_svd, S_sorted, atol=eps * np.sum(np.abs(S_sorted))) # Vectors are unitary. testutils.test_unitarity(U, eps_coef=10 * jnp.linalg.norm(U)**2) testutils.test_unitarity(V, eps_coef=10 * jnp.linalg.norm(V)**2) # U @ S @ V^H recovers the result. recon = np.dot((U * S), V.conj().T) np.testing.assert_allclose( matrix, recon, atol=2 * eps * jnp.linalg.norm(matrix)) def _initialize_finalize(N, p_sz): """ Initializes dummy results from eigh._eigh_list for eigh.finalize to act upon. """ k_counter = 0 klist = [] while k_counter < (N - p_sz): k = random.randrange(p_sz) + 1 k_counter += k klist.append(k) klist.append(N - k_counter) Hlist = [] Vlist = [] evs = [] eVs = np.zeros((N, N), dtype=np.float32) k_counter = 0 for i, k in enumerate(klist): H = np.random.randn(k, k).astype(np.float32) H = 0.5 * (H + H.conj().T) evs_i, eVs_i = np.linalg.eigh(H) evs.extend(evs_i) Hbig = np.zeros((p_sz, p_sz), dtype=np.float32) Hbig[:k, :k] = H Hbig = pops.distribute(Hbig) V = np.random.randn(N, k).astype(np.float32) V, _ = np.linalg.qr(V, mode="reduced") eVs_i = np.dot(V, eVs_i) eVs[:, k_counter:k_counter + k] = eVs_i k_counter += k Vbig = np.zeros((N, p_sz), dtype=np.float32) Vbig[:N, :k] = V Vbig = pops.distribute(Vbig) Hlist.append(Hbig) Vlist.append(Vbig) return Hlist, Vlist, klist, evs, eVs @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) def test_finalize(N, p_sz, precision): """ Tests eigh.finalize (which converts the output from eigh._eigh_list into the final eigh results). """ largest_dimension = max(pops.NROWS, pops.NCOLS) if p_sz % largest_dimension != 0: p_sz += misc.distance_to_next_divisor(p_sz, largest_dimension) Hlist, Vlist, klist, evs, Vs = _initialize_finalize(N, p_sz) evs_out, Vs_out = eigh._finalize(Hlist, Vlist, klist, N, precision) evs_out = np.array(evs_out) Vs_out = pops.undistribute(Vs_out) eps = testutils.eps(precision) atol = 10 * N * eps np.testing.assert_allclose(evs, evs_out, atol=atol) np.testing.assert_allclose(np.abs(Vs), np.abs(Vs_out), atol=atol) @pytest.mark.parametrize("N", Ns) @pytest.mark.parametrize("p_sz", p_szs) @pytest.mark.parametrize("precision", precisions) @pytest.mark.parametrize("seed", seeds) @pytest.mark.parametrize("canonical", bools) @pytest.mark.parametrize("dtype", [np.float32, ]) @pytest.mark.parametrize("padding", padding) @pytest.mark.parametrize("minimum_rank", minimum_ranks) def test_matrix_function( N, p_sz, precision, seed, canonical, dtype, padding, minimum_rank ): """ Tests that matrix_function properly compules a * A ** 2. """ a = 3.0 np.random.seed(seed) if padding is None: unpadded_dim = N else: unpadded_dim = N - padding H =
np.random.randn(N, N)
numpy.random.randn
import numpy as np import high_dimensional_sampling.functions as func import pytest def test_testfunction_rastrigin(): function = func.Rastrigin() minima = [[0, 0]] value_minima = 0 # Validate the default configuration assert function.get_dimensionality() == 2 assert function.a == 10 # Validate function properties assert function.is_bounded() is True assert function.is_differentiable() is True # Validate minima y = function(np.array(minima)) assert np.sum(1.0*(np.around(y, 8) == value_minima)) == len(y) # Validate that these minima are indeed minima x = np.random.rand(10000, function.get_dimensionality()) x *= function.ranges[:, 1] - function.ranges[:, 0] x += function.ranges[:, 0] y = function(x) assert np.sum(1.0*(np.around(y, 8) == value_minima)) <= len(y) # Validate output shape assert y.shape == (10000, 1) assert function(x, True).shape == (10000, function.get_dimensionality()) def test_testfunction_rosenbrock(): function = func.Rosenbrock() minima = [[1]*function.get_dimensionality()] value_minima = 0 # Validate the default configuration assert function.get_dimensionality() == 2 # Validate function properties assert function.is_bounded() is False assert function.is_differentiable() is False # Validate minima y = function(np.array(minima)) assert np.sum(1.0*(np.around(y, 8) == value_minima)) == len(y) # Validate error is raised when dimensionality < 2 with pytest.raises(Exception): function = func.Rosenbrock(dimensionality=1) # Validate that these minima are indeed minima x = np.random.rand(10000, function.get_dimensionality()) y = function(x) assert np.sum(1.0*(np.around(y, 8) == value_minima)) <= len(y) # Validate output shape assert y.shape == (10000, 1) with pytest.raises(func.NoDerivativeError): function(x, True) def test_testfunction_beale(): function = func.Beale() minima = [[3.0, 0.5]] value_minima = 0 # Validate the default configuration assert function.get_dimensionality() == 2 # Validate function properties assert function.is_bounded() is True assert function.is_differentiable() is False # Validate minima y = function(np.array(minima)) assert np.sum(1.0*(np.around(y, 8) == value_minima)) == len(y) # Validate that these minima are indeed minima x = np.random.rand(10000, function.get_dimensionality()) x *= function.ranges[:, 1] - function.ranges[:, 0] x += function.ranges[:, 0] y = function(x) assert np.sum(1.0*(
np.around(y, 8)
numpy.around
# This module has been generated automatically from space group information # obtained from the Computational Crystallography Toolbox # """ Space groups This module contains a list of all the 230 space groups that can occur in a crystal. The variable space_groups contains a dictionary that maps space group numbers and space group names to the corresponding space group objects. .. moduleauthor:: <NAME> <<EMAIL>> """ #----------------------------------------------------------------------------- # Copyright (C) 2013 The Mosaic Development Team # # Distributed under the terms of the BSD License. The full license is in # the file LICENSE.txt, distributed as part of this software. #----------------------------------------------------------------------------- import numpy as N class SpaceGroup(object): """ Space group All possible space group objects are created in this module. Other modules should access these objects through the dictionary space_groups rather than create their own space group objects. """ def __init__(self, number, symbol, transformations): """ :param number: the number assigned to the space group by international convention :type number: int :param symbol: the Hermann-Mauguin space-group symbol as used in PDB and mmCIF files :type symbol: str :param transformations: a list of space group transformations, each consisting of a tuple of three integer arrays (rot, tn, td), where rot is the rotation matrix and tn/td are the numerator and denominator of the translation vector. The transformations are defined in fractional coordinates. :type transformations: list """ self.number = number self.symbol = symbol self.transformations = transformations self.transposed_rotations = N.array([N.transpose(t[0]) for t in transformations]) self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2] for t in transformations])) def __repr__(self): return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol)) def __len__(self): """ :return: the number of space group transformations :rtype: int """ return len(self.transformations) def symmetryEquivalentMillerIndices(self, hkl): """ :param hkl: a set of Miller indices :type hkl: Scientific.N.array_type :return: a tuple (miller_indices, phase_factor) of two arrays of length equal to the number of space group transformations. miller_indices contains the Miller indices of each reflection equivalent by symmetry to the reflection hkl (including hkl itself as the first element). phase_factor contains the phase factors that must be applied to the structure factor of reflection hkl to obtain the structure factor of the symmetry equivalent reflection. :rtype: tuple """ hkls = N.dot(self.transposed_rotations, hkl) p = N.multiply.reduce(self.phase_factors**hkl, -1) return hkls, p space_groups = {} transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(1, 'P 1', transformations) space_groups[1] = sg space_groups['P 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(2, 'P -1', transformations) space_groups[2] = sg space_groups['P -1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(3, 'P 1 2 1', transformations) space_groups[3] = sg space_groups['P 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(4, 'P 1 21 1', transformations) space_groups[4] = sg space_groups['P 1 21 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(5, 'C 1 2 1', transformations) space_groups[5] = sg space_groups['C 1 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(6, 'P 1 m 1', transformations) space_groups[6] = sg space_groups['P 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(7, 'P 1 c 1', transformations) space_groups[7] = sg space_groups['P 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(8, 'C 1 m 1', transformations) space_groups[8] = sg space_groups['C 1 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(9, 'C 1 c 1', transformations) space_groups[9] = sg space_groups['C 1 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(10, 'P 1 2/m 1', transformations) space_groups[10] = sg space_groups['P 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(11, 'P 1 21/m 1', transformations) space_groups[11] = sg space_groups['P 1 21/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(12, 'C 1 2/m 1', transformations) space_groups[12] = sg space_groups['C 1 2/m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(13, 'P 1 2/c 1', transformations) space_groups[13] = sg space_groups['P 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(14, 'P 1 21/c 1', transformations) space_groups[14] = sg space_groups['P 1 21/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(15, 'C 1 2/c 1', transformations) space_groups[15] = sg space_groups['C 1 2/c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(16, 'P 2 2 2', transformations) space_groups[16] = sg space_groups['P 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(17, 'P 2 2 21', transformations) space_groups[17] = sg space_groups['P 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(18, 'P 21 21 2', transformations) space_groups[18] = sg space_groups['P 21 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(19, 'P 21 21 21', transformations) space_groups[19] = sg space_groups['P 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(20, 'C 2 2 21', transformations) space_groups[20] = sg space_groups['C 2 2 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(21, 'C 2 2 2', transformations) space_groups[21] = sg space_groups['C 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(22, 'F 2 2 2', transformations) space_groups[22] = sg space_groups['F 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(23, 'I 2 2 2', transformations) space_groups[23] = sg space_groups['I 2 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(24, 'I 21 21 21', transformations) space_groups[24] = sg space_groups['I 21 21 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(25, 'P m m 2', transformations) space_groups[25] = sg space_groups['P m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(26, 'P m c 21', transformations) space_groups[26] = sg space_groups['P m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(27, 'P c c 2', transformations) space_groups[27] = sg space_groups['P c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(28, 'P m a 2', transformations) space_groups[28] = sg space_groups['P m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(29, 'P c a 21', transformations) space_groups[29] = sg space_groups['P c a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(30, 'P n c 2', transformations) space_groups[30] = sg space_groups['P n c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(31, 'P m n 21', transformations) space_groups[31] = sg space_groups['P m n 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(32, 'P b a 2', transformations) space_groups[32] = sg space_groups['P b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(33, 'P n a 21', transformations) space_groups[33] = sg space_groups['P n a 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(34, 'P n n 2', transformations) space_groups[34] = sg space_groups['P n n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(35, 'C m m 2', transformations) space_groups[35] = sg space_groups['C m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(36, 'C m c 21', transformations) space_groups[36] = sg space_groups['C m c 21'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(37, 'C c c 2', transformations) space_groups[37] = sg space_groups['C c c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(38, 'A m m 2', transformations) space_groups[38] = sg space_groups['A m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(39, 'A b m 2', transformations) space_groups[39] = sg space_groups['A b m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(40, 'A m a 2', transformations) space_groups[40] = sg space_groups['A m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(41, 'A b a 2', transformations) space_groups[41] = sg space_groups['A b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(42, 'F m m 2', transformations) space_groups[42] = sg space_groups['F m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(43, 'F d d 2', transformations) space_groups[43] = sg space_groups['F d d 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(44, 'I m m 2', transformations) space_groups[44] = sg space_groups['I m m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(45, 'I b a 2', transformations) space_groups[45] = sg space_groups['I b a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(46, 'I m a 2', transformations) space_groups[46] = sg space_groups['I m a 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(47, 'P m m m', transformations) space_groups[47] = sg space_groups['P m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(48, 'P n n n :2', transformations) space_groups[48] = sg space_groups['P n n n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(49, 'P c c m', transformations) space_groups[49] = sg space_groups['P c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(50, 'P b a n :2', transformations) space_groups[50] = sg space_groups['P b a n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(51, 'P m m a', transformations) space_groups[51] = sg space_groups['P m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(52, 'P n n a', transformations) space_groups[52] = sg space_groups['P n n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(53, 'P m n a', transformations) space_groups[53] = sg space_groups['P m n a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(54, 'P c c a', transformations) space_groups[54] = sg space_groups['P c c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(55, 'P b a m', transformations) space_groups[55] = sg space_groups['P b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(56, 'P c c n', transformations) space_groups[56] = sg space_groups['P c c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(57, 'P b c m', transformations) space_groups[57] = sg space_groups['P b c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(58, 'P n n m', transformations) space_groups[58] = sg space_groups['P n n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(59, 'P m m n :2', transformations) space_groups[59] = sg space_groups['P m m n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(60, 'P b c n', transformations) space_groups[60] = sg space_groups['P b c n'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(61, 'P b c a', transformations) space_groups[61] = sg space_groups['P b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(62, 'P n m a', transformations) space_groups[62] = sg space_groups['P n m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(63, 'C m c m', transformations) space_groups[63] = sg space_groups['C m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(64, 'C m c a', transformations) space_groups[64] = sg space_groups['C m c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(65, 'C m m m', transformations) space_groups[65] = sg space_groups['C m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(66, 'C c c m', transformations) space_groups[66] = sg space_groups['C c c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(67, 'C m m a', transformations) space_groups[67] = sg space_groups['C m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(68, 'C c c a :2', transformations) space_groups[68] = sg space_groups['C c c a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(69, 'F m m m', transformations) space_groups[69] = sg space_groups['F m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,3,3]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,0,3]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([4,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,1,1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([2,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([4,4,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(70, 'F d d d :2', transformations) space_groups[70] = sg space_groups['F d d d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(71, 'I m m m', transformations) space_groups[71] = sg space_groups['I m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(72, 'I b a m', transformations) space_groups[72] = sg space_groups['I b a m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(73, 'I b c a', transformations) space_groups[73] = sg space_groups['I b c a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(74, 'I m m a', transformations) space_groups[74] = sg space_groups['I m m a'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(75, 'P 4', transformations) space_groups[75] = sg space_groups['P 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(76, 'P 41', transformations) space_groups[76] = sg space_groups['P 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(77, 'P 42', transformations) space_groups[77] = sg space_groups['P 42'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(78, 'P 43', transformations) space_groups[78] = sg space_groups['P 43'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(79, 'I 4', transformations) space_groups[79] = sg space_groups['I 4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(80, 'I 41', transformations) space_groups[80] = sg space_groups['I 41'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(81, 'P -4', transformations) space_groups[81] = sg space_groups['P -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(82, 'I -4', transformations) space_groups[82] = sg space_groups['I -4'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(83, 'P 4/m', transformations) space_groups[83] = sg space_groups['P 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(84, 'P 42/m', transformations) space_groups[84] = sg space_groups['P 42/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(85, 'P 4/n :2', transformations) space_groups[85] = sg space_groups['P 4/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(86, 'P 42/n :2', transformations) space_groups[86] = sg space_groups['P 42/n :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(87, 'I 4/m', transformations) space_groups[87] = sg space_groups['I 4/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(88, 'I 41/a :2', transformations) space_groups[88] = sg space_groups['I 41/a :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(89, 'P 4 2 2', transformations) space_groups[89] = sg space_groups['P 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(90, 'P 4 21 2', transformations) space_groups[90] = sg space_groups['P 4 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(91, 'P 41 2 2', transformations) space_groups[91] = sg space_groups['P 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(92, 'P 41 21 2', transformations) space_groups[92] = sg space_groups['P 41 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(93, 'P 42 2 2', transformations) space_groups[93] = sg space_groups['P 42 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(94, 'P 42 21 2', transformations) space_groups[94] = sg space_groups['P 42 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,3]) trans_den = N.array([1,1,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(95, 'P 43 2 2', transformations) space_groups[95] = sg space_groups['P 43 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([2,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(96, 'P 43 21 2', transformations) space_groups[96] = sg space_groups['P 43 21 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(97, 'I 4 2 2', transformations) space_groups[97] = sg space_groups['I 4 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(98, 'I 41 2 2', transformations) space_groups[98] = sg space_groups['I 41 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(99, 'P 4 m m', transformations) space_groups[99] = sg space_groups['P 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(100, 'P 4 b m', transformations) space_groups[100] = sg space_groups['P 4 b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(101, 'P 42 c m', transformations) space_groups[101] = sg space_groups['P 42 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(102, 'P 42 n m', transformations) space_groups[102] = sg space_groups['P 42 n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(103, 'P 4 c c', transformations) space_groups[103] = sg space_groups['P 4 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(104, 'P 4 n c', transformations) space_groups[104] = sg space_groups['P 4 n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(105, 'P 42 m c', transformations) space_groups[105] = sg space_groups['P 42 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(106, 'P 42 b c', transformations) space_groups[106] = sg space_groups['P 42 b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(107, 'I 4 m m', transformations) space_groups[107] = sg space_groups['I 4 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(108, 'I 4 c m', transformations) space_groups[108] = sg space_groups['I 4 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(109, 'I 41 m d', transformations) space_groups[109] = sg space_groups['I 41 m d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(110, 'I 41 c d', transformations) space_groups[110] = sg space_groups['I 41 c d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(111, 'P -4 2 m', transformations) space_groups[111] = sg space_groups['P -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(112, 'P -4 2 c', transformations) space_groups[112] = sg space_groups['P -4 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(113, 'P -4 21 m', transformations) space_groups[113] = sg space_groups['P -4 21 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(114, 'P -4 21 c', transformations) space_groups[114] = sg space_groups['P -4 21 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(115, 'P -4 m 2', transformations) space_groups[115] = sg space_groups['P -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(116, 'P -4 c 2', transformations) space_groups[116] = sg space_groups['P -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(117, 'P -4 b 2', transformations) space_groups[117] = sg space_groups['P -4 b 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(118, 'P -4 n 2', transformations) space_groups[118] = sg space_groups['P -4 n 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(119, 'I -4 m 2', transformations) space_groups[119] = sg space_groups['I -4 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(120, 'I -4 c 2', transformations) space_groups[120] = sg space_groups['I -4 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(121, 'I -4 2 m', transformations) space_groups[121] = sg space_groups['I -4 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,3]) trans_den = N.array([2,1,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,5]) trans_den = N.array([1,2,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(122, 'I -4 2 d', transformations) space_groups[122] = sg space_groups['I -4 2 d'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(123, 'P 4/m m m', transformations) space_groups[123] = sg space_groups['P 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(124, 'P 4/m c c', transformations) space_groups[124] = sg space_groups['P 4/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(125, 'P 4/n b m :2', transformations) space_groups[125] = sg space_groups['P 4/n b m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(126, 'P 4/n n c :2', transformations) space_groups[126] = sg space_groups['P 4/n n c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(127, 'P 4/m b m', transformations) space_groups[127] = sg space_groups['P 4/m b m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(128, 'P 4/m n c', transformations) space_groups[128] = sg space_groups['P 4/m n c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(129, 'P 4/n m m :2', transformations) space_groups[129] = sg space_groups['P 4/n m m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(130, 'P 4/n c c :2', transformations) space_groups[130] = sg space_groups['P 4/n c c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(131, 'P 42/m m c', transformations) space_groups[131] = sg space_groups['P 42/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(132, 'P 42/m c m', transformations) space_groups[132] = sg space_groups['P 42/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(133, 'P 42/n b c :2', transformations) space_groups[133] = sg space_groups['P 42/n b c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(134, 'P 42/n n m :2', transformations) space_groups[134] = sg space_groups['P 42/n n m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(135, 'P 42/m b c', transformations) space_groups[135] = sg space_groups['P 42/m b c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(136, 'P 42/m n m', transformations) space_groups[136] = sg space_groups['P 42/m n m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(137, 'P 42/n m c :2', transformations) space_groups[137] = sg space_groups['P 42/n m c :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(138, 'P 42/n c m :2', transformations) space_groups[138] = sg space_groups['P 42/n c m :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(139, 'I 4/m m m', transformations) space_groups[139] = sg space_groups['I 4/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(140, 'I 4/m c m', transformations) space_groups[140] = sg space_groups['I 4/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(141, 'I 41/a m d :2', transformations) space_groups[141] = sg space_groups['I 41/a m d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,3,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,-1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-3,-3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,5,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([3,3,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,5,5]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([3,3,3]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,-1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,-1,-1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([4,4,4]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(142, 'I 41/a c d :2', transformations) space_groups[142] = sg space_groups['I 41/a c d :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(143, 'P 3', transformations) space_groups[143] = sg space_groups['P 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(144, 'P 31', transformations) space_groups[144] = sg space_groups['P 31'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(145, 'P 32', transformations) space_groups[145] = sg space_groups['P 32'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(146, 'R 3 :H', transformations) space_groups[146] = sg space_groups['R 3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(147, 'P -3', transformations) space_groups[147] = sg space_groups['P -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(148, 'R -3 :H', transformations) space_groups[148] = sg space_groups['R -3 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(149, 'P 3 1 2', transformations) space_groups[149] = sg space_groups['P 3 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(150, 'P 3 2 1', transformations) space_groups[150] = sg space_groups['P 3 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(151, 'P 31 1 2', transformations) space_groups[151] = sg space_groups['P 31 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(152, 'P 31 2 1', transformations) space_groups[152] = sg space_groups['P 31 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(153, 'P 32 1 2', transformations) space_groups[153] = sg space_groups['P 32 1 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(154, 'P 32 2 1', transformations) space_groups[154] = sg space_groups['P 32 2 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(155, 'R 3 2 :H', transformations) space_groups[155] = sg space_groups['R 3 2 :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(156, 'P 3 m 1', transformations) space_groups[156] = sg space_groups['P 3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(157, 'P 3 1 m', transformations) space_groups[157] = sg space_groups['P 3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(158, 'P 3 c 1', transformations) space_groups[158] = sg space_groups['P 3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(159, 'P 3 1 c', transformations) space_groups[159] = sg space_groups['P 3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(160, 'R 3 m :H', transformations) space_groups[160] = sg space_groups['R 3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(161, 'R 3 c :H', transformations) space_groups[161] = sg space_groups['R 3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(162, 'P -3 1 m', transformations) space_groups[162] = sg space_groups['P -3 1 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(163, 'P -3 1 c', transformations) space_groups[163] = sg space_groups['P -3 1 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(164, 'P -3 m 1', transformations) space_groups[164] = sg space_groups['P -3 m 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(165, 'P -3 c 1', transformations) space_groups[165] = sg space_groups['P -3 c 1'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(166, 'R -3 m :H', transformations) space_groups[166] = sg space_groups['R -3 m :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(177, 'P 6 2 2', transformations) space_groups[177] = sg space_groups['P 6 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(178, 'P 61 2 2', transformations) space_groups[178] = sg space_groups['P 61 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(179, 'P 65 2 2', transformations) space_groups[179] = sg space_groups['P 65 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(180, 'P 62 2 2', transformations) space_groups[180] = sg space_groups['P 62 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(181, 'P 64 2 2', transformations) space_groups[181] = sg space_groups['P 64 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(182, 'P 63 2 2', transformations) space_groups[182] = sg space_groups['P 63 2 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(183, 'P 6 m m', transformations) space_groups[183] = sg space_groups['P 6 m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(184, 'P 6 c c', transformations) space_groups[184] = sg space_groups['P 6 c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(185, 'P 63 c m', transformations) space_groups[185] = sg space_groups['P 63 c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(186, 'P 63 m c', transformations) space_groups[186] = sg space_groups['P 63 m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(187, 'P -6 m 2', transformations) space_groups[187] = sg space_groups['P -6 m 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(188, 'P -6 c 2', transformations) space_groups[188] = sg space_groups['P -6 c 2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(189, 'P -6 2 m', transformations) space_groups[189] = sg space_groups['P -6 2 m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(190, 'P -6 2 c', transformations) space_groups[190] = sg space_groups['P -6 2 c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(191, 'P 6/m m m', transformations) space_groups[191] = sg space_groups['P 6/m m m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(192, 'P 6/m c c', transformations) space_groups[192] = sg space_groups['P 6/m c c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(193, 'P 63/m c m', transformations) space_groups[193] = sg space_groups['P 63/m c m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(194, 'P 63/m m c', transformations) space_groups[194] = sg space_groups['P 63/m m c'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(195, 'P 2 3', transformations) space_groups[195] = sg space_groups['P 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(196, 'F 2 3', transformations) space_groups[196] = sg space_groups['F 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(197, 'I 2 3', transformations) space_groups[197] = sg space_groups['I 2 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(198, 'P 21 3', transformations) space_groups[198] = sg space_groups['P 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,0]) trans_den = N.array([2,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,0]) trans_den = N.array([1,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(199, 'I 21 3', transformations) space_groups[199] = sg space_groups['I 21 3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(200, 'P m -3', transformations) space_groups[200] = sg space_groups['P m -3'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,0,1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,-1,-1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([-1,0,-1]) trans_den = N.array([2,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([-1,-1,0]) trans_den = N.array([2,2,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(201, 'P n -3 :2', transformations) space_groups[201] = sg space_groups['P n -3 :2'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,-1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,-1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,1,-1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,-1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,0,0,-1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,-1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,-1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,0,0,1,1,0,0]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,0,1,1,0,0,0,1,0]) rot.shape = (3, 3) trans_num = N.array([0,1,1]) trans_den = N.array([1,2,2]) transformations.append((rot, trans_num, trans_den)) rot =
N.array([0,1,0,0,0,1,1,0,0])
numpy.array
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import sys import tarfile import numpy as np from six.moves import urllib import tensorflow as tf import resnet18_input FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer('batch_size', 128,"""Number of images to process in a batch.""") tf.app.flags.DEFINE_string('data_dir_train', '/home/oran/ILSVRC2012/ILSVRC2012_img_train/',"""Path to the ILSVRC2012 data directory.""") tf.app.flags.DEFINE_string('data_dir_eval', '/home/oran/ILSVRC2012/ILSVRC2012_img_val/',"""Path to the ILSVRC2012 data directory.""") tf.app.flags.DEFINE_string('weights_dir', '/home/oran/workdir/resnet/full_precision/weights/',"""Path to the Pre-trained weights.""") tf.app.flags.DEFINE_boolean('use_fp16', False,"""Train the model using fp16.""") tf.app.flags.DEFINE_float('wd', 1e-12,"""Number of images to process in a batch.""") tf.app.flags.DEFINE_float('wd_weights', 0.00001,"""Number of images to process in a batch.""") tf.app.flags.DEFINE_float('dropout', 1.0,"""Dropout rate.""") tf.app.flags.DEFINE_boolean('projection', True,"""Projection layer or zero padding.""") tf.app.flags.DEFINE_boolean('use_pretrained', False,"""Whether to use the pre-trained weights or not.""") tf.app.flags.DEFINE_boolean('hot_start', True,"""Whether this is a new run or not.""") tf.app.flags.DEFINE_string('hot_start_dir', '/home/oran/logdir/resnet18_3/weights',"") tf.app.flags.DEFINE_float('learning_rate', 0.01,"""Whether this is a new run or not.""") tf.app.flags.DEFINE_float('lr_decay_epochs', 30,"""Whether this is a new run or not.""") # Global constants describing the CIFAR-10 data set. IMAGE_SIZE = resnet18_input.IMAGE_SIZE NUM_CLASSES = resnet18_input.NUM_CLASSES NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = resnet18_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = resnet18_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL delta = 0.15 # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. # If a model is trained with multiple GPUs, prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' def draw_ternary_weight(a, b): var = (2*(np.random.binomial(1,p=a/(a+b)) - 0.5))*np.random.binomial(1,p=a+b) return var def init_probs(prob): init_val = np.array(prob, dtype=np.float32) init_val[init_val<1e-4] = 1e-4 init_val[init_val>0.9999] = 0.9999 init_val = np.log(init_val/(1-init_val)) return init_val def init_values_hs(W): W = np.array(W) c_init = 0.55*np.float32(W==0)+0.15 # Was 0.7 , 0.1 a_nz_init = np.float32(W==1) + 0.5*np.float32(W==0) c_init = init_probs(c_init) a_nz_init = init_probs(a_nz_init) return c_init, a_nz_init def init_values_hs_ver2(W): W = np.array(W) W[W>1] = 1 W[W<-1] = -1 c_init = 0.95 - 0.9*np.abs(W) a_nz_init = 0.5*(1+W/(1-c_init)) a_nz_init[a_nz_init>0.95] = 0.95 a_nz_init[a_nz_init<0.05] = 0.05 # a_nz_init = np.float32(W>=0.33) + 0.5*((W<0.33) & (W>-0.33)) c_init = init_probs(c_init) a_nz_init = init_probs(a_nz_init) return c_init, a_nz_init def initializer(scope, shape, prob): if FLAGS.hot_start: c_init, a_nz_init = init_values_hs_ver2(np.load(FLAGS.hot_start_dir+'/W_'+scope.name.replace('/','_')+'.npy')) else: c_init = init_probs(np.random.uniform(0.45, 0.55, shape)) a_nz_init = init_probs(np.random.binomial(1,p=0.5*np.ones(shape))) if prob=='c': return c_init else: return a_nz_init def reparametrization(prev_layer, shape, scope, kernel, stride, conv=True, train=True, relu=True): c_ = tf.get_variable('c', initializer=initializer(scope, shape, 'c'), dtype=tf.float32) a_nz_ = tf.get_variable('a_nz', initializer=initializer(scope, shape, 'a_nz'), dtype=tf.float32) wd_c_ = tf.multiply(tf.nn.l2_loss(c_), FLAGS.wd, name='weight_loss_c') tf.add_to_collection('losses', wd_c_) wd_a_nz_ = tf.multiply(tf.nn.l2_loss(a_nz_), FLAGS.wd, name='weight_loss_a_nz') tf.add_to_collection('losses', wd_a_nz_) c = tf.nn.sigmoid(c_) a_nz = tf.nn.sigmoid(a_nz_) a = a_nz*(1-c) b = (1-a_nz)*(1-c) if train: mu = a - b var = a + b - tf.square(mu) normal_dist = tf.contrib.distributions.Normal(loc=0., scale=1.) if conv: mu_bar = tf.nn.conv2d(prev_layer, mu, [1, stride, stride, 1], padding='SAME') sigma_bar = tf.sqrt(tf.nn.conv2d(tf.square(prev_layer), var, [1, stride, stride, 1], padding='SAME')+0.001) # n = shape[0]*shape[1]*shape[2] else: mu_bar = tf.matmul(prev_layer, mu) sigma_bar = tf.sqrt(tf.matmul(tf.square(prev_layer), var)+0.001) # n = shape[0] # res = normal_dist.sample(tf.shape(mu_bar))*sigma_bar/np.sqrt(n) + mu_bar res = normal_dist.sample(tf.shape(mu_bar))*sigma_bar + mu_bar tf.summary.histogram('a',a) tf.summary.histogram('b',b) tf.summary.histogram('c',c) else: if conv: res = tf.nn.conv2d(prev_layer, kernel, [1, stride, stride, 1], padding='SAME') else: res = tf.matmul(prev_layer, kernel) res_normed = tf.contrib.layers.batch_norm(res, center=True, scale=True, is_training=train, scope=scope) if relu: output = tf.nn.relu(res_normed) else: output = res_normed return output def get_probs(): c_ = tf.get_variable('c') a_nz_ = tf.get_variable('a_nz') c = tf.nn.sigmoid(c_) a_nz = tf.nn.sigmoid(a_nz_) a = a_nz*(1-c) b = (1-a_nz)*(1-c) return a, b def conv_relu(scope, layer_name, prev_layer, conv_shape, stride, train): kernel = _variable_with_weight_decay('weights', shape=conv_shape, wd=FLAGS.wd, layer_name=layer_name) conv = tf.nn.conv2d(prev_layer, kernel, [1, stride, stride, 1], padding='SAME') conv_normed = tf.contrib.layers.batch_norm(conv, center=True, scale=True, is_training=train, scope=scope) output = tf.nn.relu(conv_normed) return output def res_block(scope, prev_layer, conv_shapes, stride, train, W1, W2a, W2b): # branch2a with tf.variable_scope('branch2a') as scope_inner: branch2a = reparametrization(prev_layer, conv_shapes[0], scope_inner, W2a, stride, train=train) # branch2b with tf.variable_scope('branch2b') as scope_inner: branch2b = reparametrization(branch2a, conv_shapes[1], scope_inner, W2b, 1, train=train, relu=False) # Input projection and output input_depth = prev_layer.get_shape().as_list()[3] output_depth = conv_shapes[1][3] if (input_depth != output_depth) & (FLAGS.projection==True): with tf.variable_scope('branch1') as scope_inner: branch1 = reparametrization(prev_layer, [1, 1, input_depth, output_depth], scope_inner, W1, stride, train=train, relu=False) elif (input_depth != output_depth) & (FLAGS.projection==False): with tf.variable_scope('branch1') as scope_inner: prev_layer = tf.nn.max_pool(prev_layer, ksize=[1, 1, 1, 1], strides=[1, stride, stride, 1], padding='SAME') branch1 = tf.pad(prev_layer, [[0,0], [0,0], [0,0], [0, output_depth - input_depth]]) else: branch1 = prev_layer output = tf.nn.relu(tf.add(branch2b, branch1)) return output def _activation_summary(x): # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer, layer_name): with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 if (FLAGS.use_pretrained) & (layer_name != 'new'): init = np.load(FLAGS.weights_dir+layer_name+'.npy') var = tf.get_variable(name, initializer=init, dtype=dtype) else: init = initializer var = tf.get_variable(name, shape, initializer=init, dtype=dtype) return var def _variable_with_weight_decay(name, shape, wd, layer_name): var = _variable_on_cpu( name, shape, tf.contrib.layers.xavier_initializer(uniform=False), layer_name) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def distorted_inputs(): if not FLAGS.data_dir_train: raise ValueError('Please supply a data_dir') images, labels = resnet18_input.distorted_inputs(data_dir=FLAGS.data_dir_train, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inputs(eval_data): if not FLAGS.data_dir_eval: raise ValueError('Please supply a data_dir') if eval_data: data_dir = FLAGS.data_dir_eval else: data_dir = FLAGS.data_dir_train images, labels = resnet18_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inference(images, W1, W2_1_b2a, W2_1_b2b, W2_2_b2a, W2_2_b2b, W3_1_b1, W3_1_b2a, W3_1_b2b, W3_2_b2a, W3_2_b2b, W4_1_b1, W4_1_b2a, W4_1_b2b, W4_2_b2a, W4_2_b2b, W5_1_b1, W5_1_b2a, W5_1_b2b, W5_2_b2a, W5_2_b2b, train=False): tf.summary.image('images', images, max_outputs=1) ## conv1 with tf.variable_scope('conv1') as scope: conv1 = reparametrization(images, [7, 7, 3, 64], scope, W1, 2, train=train) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') ## conv2 # conv2_1 with tf.variable_scope('conv2_1') as scope: conv2_1 = res_block(scope, pool1, [[3,3,64,64],[3,3,64,64]], 1, train, None, W2_1_b2a, W2_1_b2b) _activation_summary(conv2_1) # conv2_2 with tf.variable_scope('conv2_2') as scope: conv2_2 = res_block(scope, conv2_1, [[3,3,64,64],[3,3,64,64]], 1, train, None, W2_2_b2a, W2_2_b2b) _activation_summary(conv2_2) ## conv3 # conv3_1 with tf.variable_scope('conv3_1') as scope: conv3_1 = res_block(scope, conv2_2, [[3,3,64,128],[3,3,128,128]], 2, train, W3_1_b1, W3_1_b2a, W3_1_b2b) _activation_summary(conv3_1) # conv3_2 with tf.variable_scope('conv3_2') as scope: conv3_2 = res_block(scope, conv3_1, [[3,3,128,128],[3,3,128,128]], 1, train, None, W3_2_b2a, W3_2_b2b) _activation_summary(conv3_2) ## conv4 # conv4_1 with tf.variable_scope('conv4_1') as scope: conv4_1 = res_block(scope, conv3_2, [[3,3,128,256],[3,3,256,256]], 2, train, W4_1_b1, W4_1_b2a, W4_1_b2b) _activation_summary(conv4_1) # conv4_2 with tf.variable_scope('conv4_2') as scope: conv4_2 = res_block(scope, conv4_1, [[3,3,256,256],[3,3,256,256]], 1, train, None, W4_2_b2a, W4_2_b2b) _activation_summary(conv4_2) ##conv5 # conv5_1 with tf.variable_scope('conv5_1') as scope: conv5_1 = res_block(scope, conv4_2, [[3,3,256,512],[3,3,512,512]], 2, train, W5_1_b1, W5_1_b2a, W5_1_b2b) _activation_summary(conv5_1) # conv5_2 with tf.variable_scope('conv5_2') as scope: conv5_2 = res_block(scope, conv5_1, [[3,3,512,512],[3,3,512,512]], 1, train, None, W5_2_b2a, W5_2_b2b) _activation_summary(conv5_2) # pool5 pool5 = tf.nn.avg_pool(conv5_2, ksize=[1, 7, 7, 1], strides=[1, 7, 7, 1], padding='SAME', name='pool5') # linear layer(WX + b), with tf.variable_scope('softmax_linear') as scope: pool5 = tf.reshape(pool5, [FLAGS.batch_size, -1]) weights = _variable_with_weight_decay('weights', [512, NUM_CLASSES], wd=FLAGS.wd_weights, layer_name=scope.name+'_w') biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0), layer_name=scope.name+'_b') softmax_linear = tf.add(tf.matmul(pool5, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def _add_loss_summaries(total_loss): # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op def draw_weights(sess): with tf.variable_scope('conv1', reuse=True): a, b = get_probs() a_, b_ = sess.run([a, b]) W = draw_ternary_weight(a_, b_) np.save(FLAGS.train_dir+'/weights/W_conv1.npy',W) with tf.variable_scope('conv2_1', reuse=True) as scope: with tf.variable_scope('branch2a', reuse=True): a, b = get_probs() a_, b_ = sess.run([a, b]) W = draw_ternary_weight(a_, b_)
np.save(FLAGS.train_dir+'/weights/W_'+scope.name+'_branch2a.npy',W)
numpy.save
""" Code for figure 2A. Visualize a 3 block SBM with fixed background fields. <NAME>, Dec 2019. """ import numpy as np import networkx as nx import random import matplotlib.pyplot as plt import three_block_sbm_class as ThreeBlock def plot_three_block(sbm_graph,three_block,fname="three_block_plot",color_on='background_field',label=None) : N1 = N2 = N3 = 400 pos_block_1 = [ [ np.random.uniform(0,1) , np.random.uniform(0,1.0)] for k in range(N1)] pos_block_2 = [ [ np.random.uniform(1.5,3.0) , np.random.uniform(0,1.0)] for k in range(N2)] pos_block_3 = [ [
np.random.uniform(4.0,5.0)
numpy.random.uniform
# -*- coding: utf-8 -*- """ Created on Fri Jul 2 12:09:14 2021 @author: vohuynhq """ import numpy as np import pandas as pd from sympy import symbols, init_printing, pi, sqrt, diff, sin, cos, exp def example(A): A_true = np.array([[3, 4, 5], [3, 4, 5]]) np.testing.assert_equal(A, A_true) return None def derivative(eval_1,eval_2,eval_3,eval_4,*args): init_printing(use_unicode= True) x = symbols('x') f_1 = x **(3/2) + pi * x ** 2 + sqrt(2) diff_1 = diff(f_1,x) eval_1_true = diff_1.subs(x,2) f_2 = sin(x)*exp(cos(x)) diff_2 = diff(f_2,x) eval_2_true= diff_2.subs(x,pi) f_3 = exp((x + 1) ** 2) diff_3 = diff(f_3,x) eval_3_true = diff_3.subs(x,1) f_4 = x ** 2 * (cos(x) ** 3) diff_4 = diff(f_4,x) eval_4_true = diff_4.subs(x,pi) assert eval_1 == eval_1_true, "Wrong answer!" assert eval_2 == eval_2_true, "Wrong answer!" assert eval_3 == eval_3_true, "Wrong answer!" assert eval_4 == eval_4_true, "Wrong answer!" return None def jacobians_hessians(eval_1,eval_2,eval_3,eval_4): init_printing(use_unicode= True) x, y, z = symbols('x y z') f_1 = x ** 2 * cos(y) + exp(z)*sin(y) J_1 = [diff(f_1,x), diff(f_1,y), diff(f_1,z)] eval_1_true = [J_1[0].subs([(x,pi), (y,pi),(z,1)]), J_1[1].subs([(x,pi), (y,pi),(z,1)]), J_1[2].subs([(x,pi), (y,pi),(z,1)])] u = x ** 2 *y - cos(x) * sin(y) v = exp(x + y) J_u = [diff(u,x), diff(u,y)] J_v = [diff(v,x), diff(v,y)] eval_u = [J_u[0].subs([(x,0),(y,pi)]), J_u[1].subs([(x,0),(y,pi)])] eval_v = [J_v[0].subs([(x,0),(y,pi)]), J_v[1].subs([(x,0),(y,pi)])] eval_2_true = [eval_u,eval_v] f_3 = x ** 3 * cos(y) - x * sin(y) H_3 = [[diff(diff(f_3,x),x), diff(diff(f_3,x),y)], [diff(diff(f_3,y),x), diff(diff(f_3,y),y)]] eval_3_true = [[H_3[0][0].subs([(x,pi), (y,pi)]), H_3[0][1].subs([(x,pi),(y,pi)])], [H_3[1][0].subs([(x,pi), (y,pi)]), H_3[1][1].subs([(x,pi),(y,pi)])]] f_4 = x * y * cos(z) - sin(x) * exp(y) * z**3 H_4 = [[diff(diff(f_4,x),x), diff(diff(f_4,x),y), diff(diff(f_4,x),z)], [diff(diff(f_4,y),x), diff(diff(f_4,y),y), diff(diff(f_4,y),z)], [diff(diff(f_4,z),x), diff(diff(f_4,z),y), diff(diff(f_4,z),z)]] eval_4_true = [ [H_4[0][0].subs([(x,pi),(y,pi),(z,pi)]), H_4[0][1].subs([(x,pi),(y,pi),(z,pi)]), H_4[0][2].subs([(x,pi),(y,pi),(z,pi)])], [H_4[1][0].subs([(x,pi),(y,pi),(z,pi)]), H_4[1][1].subs([(x,pi),(y,pi),(z,pi)]), H_4[1][2].subs([(x,pi),(y,pi),(z,pi)])], [H_4[2][0].subs([(x,pi),(y,pi),(z,pi)]), H_4[2][1].subs([(x,pi),(y,pi),(z,pi)]), H_4[2][2].subs([(x,pi),(y,pi),(z,pi)])] ] assert set(eval_1) == set(eval_1_true), 'Wrong answer!' assert set(eval_2) == set(eval_2_true), 'Wrong answer!' assert set(eval_3) == set(eval_3_true), 'Wrong answer!' assert set(eval_4) == set(eval_4_true), 'Wrong answer!' return None def question1(m,b,X,y,*args): ## # Load dataset: # data = pd.read_csv("./dataset/Xy_dataset.csv") X = np.array(data['X']) y = np.array(data['y']) X = X.reshape(X.shape[0],1) y = y.reshape(y.shape[0],1) ## # Step 1: Initialize m and b: # m_true = 0 b_true = 0 for i in range(50): ## # Step 2: Find y_pred = mx + b: # y_pred = m*X + b ## # Step 3: Update m and b using the Gradient Descent algorithm: # dm = np.mean((y_pred - y) * X) db = np.mean(y_pred - y) m_true = m_true - 0.1*dm b_true = b_true - 0.1*db np.testing.assert_equal(m,m_true) np.testing.assert_equal(b,b_true) return None def question2(m,b,X,y,*args): ## # Step 1: Initialize m and b: # m_true = 0 b_true = 0 for i in range(50): ## # Step 2: Find y_pred = mx + b: # y_pred = m*X + b ## # Step 3: Update m and b using the Gradient Descent algorithm: # dm = np.mean((y_pred - y) * X) db = np.mean(y_pred - y) m_true = m_true - 0.1*dm b_true = b_true - 0.1*db np.testing.assert_equal(m,m_true) np.testing.assert_equal(b,b_true) return None def question3(m,b,X,y,*args): ## # Step 1: Initialize m and b: # m_true = 0 b_true = 0 for i in range(50): ## # Step 2: Find y_pred = mx + b: # y_pred = m*X + b ## # Step 3: Update m and b using the Gradient Descent algorithm: # dm = np.mean((y_pred - y) * X) db = np.mean(y_pred - y) m_true = m_true - 0.2*dm b_true = b_true - 0.2*db np.testing.assert_equal(m,m_true) np.testing.assert_equal(b,b_true) return None def question4(m,b,X,y,*args): ## # Step 1: Initialize m and b: # m_true = 0 b_true = 0 for i in range(100): ## # Step 2: Find y_pred = mx + b: # y_pred = m*X + b ## # Step 3: Update m and b using the Gradient Descent algorithm: # dm = np.mean((y_pred - y) * X) db = np.mean(y_pred - y) m_true = m_true - 0.1*dm b_true = b_true - 0.1*db np.testing.assert_equal(m,m_true)
np.testing.assert_equal(b,b_true)
numpy.testing.assert_equal
from abc import abstractmethod from collections import OrderedDict import os import pickle import re from typing import Tuple, Union import pandas as pd import numpy as np import gym from gridworld.log import logger from gridworld import ComponentEnv from gridworld.utils import to_scaled, to_raw, maybe_rescale_box_space from gridworld.agents.buildings.obs_space import make_obs_space from gridworld.agents.buildings import defaults from gridworld.agents.buildings import five_zone_rom_dynamics as dyn # Below are control variables' boundary. MAX_FLOW_RATE = [2.2, 2.2, 2.2, 2.2, 3.2] # Max flow rate for each individual zone MIN_FLOW_RATE = [.22, .22, .22, .22, .32] # Max flow rate for each individual zone MAX_TOTAL_FLOW_RATE = 10.0 # Total flow rate for all zones should be lower than 10 kg/sec. MAX_DISCHARGE_TEMP = 16.0 # Max temp of air leaving chiller MIN_DISCHARGE_TEMP = 10.0 # Min temp of air leaving chiller DEFAULT_COMFORT_BOUNDS = (22., 28.) # Temps between these values are considered "comfortable" def load_data(start_time: str = None, end_time: str = None) -> Tuple[pd.DataFrame, dict]: """Returns exogenous data dataframe, and state space model (per-zone) dict.""" THIS_DIR = os.path.dirname(os.path.abspath(__file__)) df = pd.read_csv(os.path.join(THIS_DIR, "data/exogenous_data.csv"), index_col=0) df.index = pd.DatetimeIndex(df.index) start_time = pd.Timestamp(start_time) if start_time else df.index[0] end_time = pd.Timestamp(end_time) if end_time else df.index[-1] _df = df.loc[start_time:end_time] if _df is None or len(_df) == 0: raise ValueError( f"start and/or end times ({start_time}, {end_time}) " + "resulted in empty dataframe. First and last indices are " + f"({df.index[0]}, {df.index[-1]}), choose values in this range.") with open(os.path.join(THIS_DIR, "data/state_space_model.p"), "rb") as f: models = pickle.load(f) return _df, models def get_col(df, pattern, index=None): """Returns a dataframe with columns matching regex pattern.""" return df[[c for c in df.columns if re.match(pattern, c)]].values class FiveZoneROMEnv(ComponentEnv): time: pd.Timestamp = None time_index: int = None raw_action: np.ndarray = None state: OrderedDict = None def __init__( self, name: str = None, obs_config: dict = None, start_time: Union[str, pd.Timestamp] = None, end_time: Union[str, pd.Timestamp] = None, comfort_bounds: Union[tuple, np.ndarray, pd.DataFrame] = None, zone_temp_init: np.ndarray = None, max_episode_steps: int = None, rescale_spaces: bool = True, **kwargs ): super().__init__(name=name) self.rescale_spaces = rescale_spaces self.num_zones = 5 self.obs_config = obs_config if obs_config is not None else defaults.obs_config # Set the initial zone temperature profile. if zone_temp_init is not None: self.zone_temp_init = zone_temp_init.copy() else: self.zone_temp_init = 27. * np.ones(self.num_zones, dtype=np.float64) # Load exogenous and model data. self.df, self.models = load_data(start_time, end_time) # Configure max episode steps. max_steps = self.df.shape[0] - 3 # due to filter update if max_episode_steps is None: self.max_episode_steps = max_steps else: self.max_episode_steps = min(max_episode_steps, max_steps) # The default range on comfort bounds are (lowest of low, highest of high) self.comfort_bounds = comfort_bounds if comfort_bounds is not None \ else DEFAULT_COMFORT_BOUNDS # Action space: [zone_flows] + [discharge temp] self.act_low = np.array(MIN_FLOW_RATE + [MIN_DISCHARGE_TEMP]) self.act_high = np.array(MAX_FLOW_RATE + [MAX_DISCHARGE_TEMP]) self._action_space = gym.spaces.Box( low=self.act_low, high=self.act_high, dtype=np.float64 ) self.action_space = maybe_rescale_box_space( self._action_space, rescale=self.rescale_spaces) # State space is configured via obs_config. self.comfort_bounds_df = self.make_comfort_bounds_df() self._observation_space, self._obs_labels = make_obs_space( self.num_zones, self.obs_config) self.observation_space = maybe_rescale_box_space( self._observation_space, rescale=self.rescale_spaces) def make_comfort_bounds_df(self) -> pd.DataFrame: """Returns a dataframe containing upper and lower comfort bounds on the zone temperatures.""" data = np.zeros((self.df.shape[0], 2)) if isinstance(self.comfort_bounds, tuple): data[:, 0], data[:, 1] = self.comfort_bounds[0], self.comfort_bounds[1] else: data[:, 0] = self.comfort_bounds[:data.shape[0], 0] data[:, 1] = self.comfort_bounds[:data.shape[0], 1] return pd.DataFrame(data, columns=["temp_lb", "temp_ub"], index=self.df.index) def _set_exogenous(self): self.temp_oa = get_col(self.df, "T_oa")[self.time_index][0] self.q_solar = get_col(self.df, "Q_solar")[self.time_index] self.q_cool = get_col(self.df, "Q_cool_", )[self.time_index, :] self.q_int = get_col(self.df, "Q_int")[self.time_index] def reset(self, **obs_kwargs) -> np.ndarray: """Resets the environment to the initial state and returns this state.""" self.time_index = 0 self.time = self.df.index[self.time_index] self.state = None # Set initial state values and exogenous data. self.zone_temp = self.zone_temp_init.copy() self._set_exogenous() self.p_consumed = 0. # Build the u-vector given current state and exogenous data. self.u = dyn.build_u_vector( self.models, zone_temp=self.zone_temp, action=None, temp_oa=self.temp_oa, q_solar=self.q_solar, q_int=self.q_int, q_cool=self.q_cool ) # Filter update x2. for _ in range(2): self.models = dyn.filter_update( self.models, self.zone_temp, self.u) # Update the zone temperatures based on the filter update. self.zone_temp = dyn.temp_dynamics(self.models) obs, _ = self.get_obs(**obs_kwargs) return obs def step(self, action: np.ndarray, **obs_kwargs) -> Tuple[np.ndarray, float, bool, dict]: if self.rescale_spaces: action = to_raw(action, self._action_space.low, self._action_space.high) return self.step_(action, **obs_kwargs) def step_( self, action: np.ndarray, **obs_kwargs ) -> Tuple[np.ndarray, float, bool, dict]: """Applies the action to the system and takes a time step. Returns the new state, stage reward, boolean to indicate whether state is terminal, and dictionary of any desired metadata. In some settings, the p setpoint will be updated exogenously.""" action = np.array(action).squeeze() self.raw_action = action # Advance the dynamics and update the model and state variables. self.model, self.zone_temp = dyn.dynamics( self.models, self.zone_temp, action, self.temp_oa, self.q_solar, self.q_int ) self.p_consumed = dyn.get_p_consumed(action, self.temp_oa) # Get the reward rew, _ = self.step_reward() # Step in time and update the exogenous self.time_index += 1 self.time = self.df.index[self.time_index] self._set_exogenous() # Call get_obs before returning so state dict is updated. obs, state = self.get_obs(**obs_kwargs) return np.array(obs), rew, self.is_terminal(), state def get_obs( self, **obs_kwargs ) -> Tuple[np.ndarray, dict]: """Returns the current state, clipping the values as specified by the gym observation space box constraints. Calling this method also updates the state dict attribute for convenience.""" # Call the ROM model to get the new zone temps # Compute the temperature violation per zone temp_lb = self.comfort_bounds_df["temp_lb"][self.time].copy() temp_ub = self.comfort_bounds_df["temp_ub"][self.time].copy() zone_upper_temp_viol = np.zeros(self.num_zones, dtype=np.float64) zone_lower_temp_viol = np.zeros(self.num_zones, dtype=np.float64) for i, temp in enumerate(self.zone_temp): # zone_temp_viol[i] = max(max(0, temp_lb - temp), max(0, temp - temp_ub)) # Positive violation is true violation while negative violation means margin. zone_upper_temp_viol[i] = temp - temp_ub zone_lower_temp_viol[i] = temp_lb - temp # Add nominal values for bus_voltage and p_setpoint if not provided in kwargs bus_voltage = obs_kwargs.get("bus_voltage") p_setpoint = obs_kwargs.get("p_setpoint") # Create a dict to record all possible state values. We can then filter # them out using the obs_config when creating the obs array. # TODO: Automate making sure state keys have same order as DEFAULT_OBS_CONFIG. self.state = OrderedDict({"zone_temp_{}".format(k): v for k, v in enumerate(self.zone_temp)}) self.state.update({"zone_upper_viol_{}".format(k): v for k, v in enumerate(zone_upper_temp_viol)}) self.state.update({"zone_lower_viol_{}".format(k): v for k, v in enumerate(zone_lower_temp_viol)}) self.state.update({ "comfort_lower": temp_lb, # current comfort lower bound "comfort_upper": temp_ub, # current comfort upper bound "outdoor_temp": self.temp_oa, # current outdoor temp "p_consumed": self.p_consumed, # current p consumed "time_of_day": 1. * self.time_index / self.max_episode_steps, # time, "bus_voltage": bus_voltage if bus_voltage is not None else 1.0, "min_voltage": bus_voltage if bus_voltage is not None else 1.0, "max_voltage": bus_voltage if bus_voltage is not None else 1.0, "p_setpoint": p_setpoint if p_setpoint is not None else np.inf }) self.state.update(obs_kwargs) # Create the filtered observation array and clip values to low/high obs = np.array( [v for k, v in self.state.items() if k in self.obs_labels], dtype=object # otherwise a warning is raised about ragged seq ).astype(np.float64) obs = np.clip(obs, self._observation_space.low, self._observation_space.high).squeeze() if self.rescale_spaces: obs = to_scaled(obs, self._observation_space.low, self._observation_space.high) return obs.copy(), self.state.copy() def step_reward(self) -> Tuple[float, dict]: """Default reward is soft constraint on comfort bounds.""" viol_lower = [v for k,v in self.state.items() if k.startswith("zone_upper_viol_")] viol_upper = [v for k,v in self.state.items() if k.startswith("zone_upper_viol_")] rew = np.array(viol_lower)**2 +
np.array(viol_upper)
numpy.array
""" Varibles and functions used by the "flux" code. """ import numpy as np from datetime import datetime, timedelta import zfun # path provided by calling code import pandas as pd import Lfun import pickle from warnings import filterwarnings filterwarnings('ignore') # skip some warning messages # associated with lines like QQp[QQ<=0] = np.nan def get_fluxes(indir, sect_name, in_sign=1): # form time series of net 2-layer transports into (+) and out of (-) the volume bulk = pickle.load(open(indir + 'bulk/' + sect_name + '.p', 'rb')) QQ = bulk['QQ'] SS = bulk['SS'] ot = bulk['ot'] dt2 = [] for tt in ot: dt2.append(Lfun.modtime_to_datetime(tt)) # separate inflowing and outflowing transports QQp = QQ.copy() QQp[QQ<=0] = np.nan QQm = QQ.copy() QQm[QQ>=0] = np.nan # form two-layer versions of Q and S if in_sign == 1: Qin = np.nansum(QQp, axis=1) QSin = np.nansum(QQp*SS, axis=1) Qout = np.nansum(QQm, axis=1) QSout = np.nansum(QQm*SS, axis=1) elif in_sign == -1: Qin = -np.nansum(QQm, axis=1) QSin = -np.nansum(QQm*SS, axis=1) Qout = -np.nansum(QQp, axis=1) QSout = -np.nansum(QQp*SS, axis=1) Sin = QSin/Qin Sout = QSout/Qout fnet = bulk['fnet_lp'] # net tidal energy flux qabs = bulk['qabs_lp'] # low pass of absolute value of net transport tef_df = pd.DataFrame(index=dt2) tef_df['Qin']=Qin tef_df['Qout']=Qout tef_df['QSin']=QSin tef_df['QSout']=QSout tef_df['Sin']=Sin tef_df['Sout']=Sout tef_df['Ftide'] = fnet * in_sign tef_df['Qtide'] = qabs return tef_df def get_budgets(sv_lp_df, v_lp_df, riv_df, tef_df_dict, seg_list): # volume budget vol_df = pd.DataFrame(0, index=sv_lp_df.index, columns=['Qin','-Qout', 'Qtide']) for sect_name in tef_df_dict.keys(): df = tef_df_dict[sect_name] vol_df['Qin'] = vol_df['Qin'] + df['Qin'] vol_df['-Qout'] = vol_df['-Qout'] - df['Qout'] vol_df['Qtide'] = vol_df['Qtide'] + df['Qtide'] vol_df['Qr'] = riv_df.sum(axis=1) v = v_lp_df.sum(axis=1).to_numpy() vol_df.loc[:,'V'] = v vol_df.loc[1:-1, 'dV_dt'] = (v[2:] - v[:-2]) / (2*86400) vol_df['Error'] = vol_df['dV_dt'] - vol_df.loc[:,'Qin'] + vol_df.loc[:,'-Qout'] - vol_df.loc[:,'Qr'] vol_rel_err = vol_df['Error'].mean()/vol_df['Qr'].mean() # salt budget salt_df = pd.DataFrame(0, index=sv_lp_df.index, columns=['QSin','-QSout','Ftide']) for sect_name in tef_df_dict.keys(): df = tef_df_dict[sect_name] salt_df['QSin'] = salt_df['QSin'] + df['QSin'] salt_df['-QSout'] = salt_df['-QSout'] - df['QSout'] salt_df['Ftide'] = salt_df['Ftide'] + df['Ftide'] sn = sv_lp_df[seg_list].sum(axis=1).values salt_df.loc[1:-1, 'dSnet_dt'] = (sn[2:] - sn[:-2]) / (2*86400) salt_df['Smean'] = sv_lp_df.sum(axis=1)/vol_df['V'] salt_df['Error'] = salt_df['dSnet_dt'] - salt_df['QSin'] + salt_df['-QSout'] salt_rel_err = salt_df['Error'].mean()/salt_df['QSin'].mean() # add a few more columns to plot in a different way salt_df['Qe'] = (vol_df['Qin'] + vol_df['-Qout'])/2 salt_df['Qnet'] = (vol_df['-Qout'] - vol_df['Qin']) salt_df['Sin'] = salt_df['QSin']/vol_df['Qin'] salt_df['Sout'] = salt_df['-QSout']/vol_df['-Qout'] salt_df['DS'] = salt_df['Sin'] - salt_df['Sout'] salt_df['Sbar'] = (salt_df['QSin']/vol_df['Qin'] + salt_df['-QSout']/vol_df['-Qout'])/2 salt_df['QeDS'] = salt_df['Qe'] * salt_df['DS'] salt_df['-QrSbar'] = -salt_df['Qnet'] * salt_df['Sbar'] salt_rel_err_qe = salt_df['Error'].mean()/salt_df['QeDS'].mean() # make sure everything is numeric for cn in vol_df.columns: vol_df[cn] = pd.to_numeric(vol_df[cn]) for cn in salt_df.columns: salt_df[cn] = pd.to_numeric(salt_df[cn]) return vol_df, salt_df, vol_rel_err, salt_rel_err, salt_rel_err_qe # desired time ranges, the "seasons" def get_dtr(year): dtr = {} dtr['full'] = (datetime(year,1,1,12,0,0), datetime(year,12,31,12,0,0)) dtr['winter'] = (datetime(year,1,1,12,0,0), datetime(year,3,31,12,0,0)) # JFM dtr['spring'] = (datetime(year,4,1,12,0,0), datetime(year,6,30,12,0,0)) # AMJ dtr['summer'] = (datetime(year,7,1,12,0,0), datetime(year,9,30,12,0,0)) # JAS dtr['fall'] = (datetime(year,10,1,12,0,0), datetime(year,12,31,12,0,0)) # OMD return dtr # Lists of 2-layer segments to use for "initial condition" experiments in the flux_engine. # The keys should match up with "src" values in flux_engine.py. def get_seg_list(X,N): sl = [X+str(n)+'_s' for n in range(1,N+1)]+[X+str(n)+'_f' for n in range(1,N+1)] return sl # make a segment list ic_seg2_dict = {'IC_HoodCanalInner': ['H'+str(n)+'_s' for n in range(3,9)], 'IC_HoodCanal': get_seg_list('H',8), 'IC_SouthSound': get_seg_list('S',4), 'IC_Whidbey': get_seg_list('W',4), 'IC_PS': get_seg_list('A',3) + get_seg_list('M',6) + get_seg_list('T',2) + get_seg_list('S',4) + get_seg_list('H',8) + get_seg_list('W',4), 'IC_SoG': get_seg_list('G',6), 'IC_Salish': get_seg_list('A',3) + get_seg_list('M',6) + get_seg_list('T',2) + get_seg_list('S',4) + get_seg_list('H',8) + get_seg_list('W',4) + get_seg_list('G',6) + get_seg_list('J',4) } season_list = list(get_dtr(2017).keys()) # create Series of two-layer volumes # this is the one place to specify the ratio volume in the "salty" and "fresh" layers def get_V(v_df): V = pd.Series() for seg_name in v_df.index: V[seg_name+'_s'] = 0.8 * v_df.loc[seg_name,'volume m3'] V[seg_name+'_f'] = 0.2 * v_df.loc[seg_name,'volume m3'] return V # segment definitions, assembled by looking at the figures # created by plot_thalweg_mean.py segs = { 'J1':{'S':[], 'N':[], 'W':['jdf1'], 'E':['jdf2'], 'R':['sanjuan', 'hoko']}, 'J2':{'S':[], 'N':[], 'W':['jdf2'], 'E':['jdf3'], 'R':[]}, 'J3':{'S':[], 'N':[], 'W':['jdf3'], 'E':['jdf4'], 'R':['elwha']}, 'J4':{'S':[], 'N':['sji1'], 'W':['jdf4'], 'E':['ai1','dp'], 'R':['dungeness']}, 'G1':{'S':['sji1'], 'N':['sji2'], 'W':[], 'E':[], 'R':['samish']}, 'G2':{'S':['sji2'], 'N':['sog1'], 'W':[], 'E':[], 'R':['nooksack', 'cowichan']}, 'G3':{'S':['sog1'], 'N':['sog2'], 'W':[], 'E':[], 'R':['nanaimo', 'fraser']}, 'G4':{'S':['sog2'], 'N':[], 'W':['sog3'], 'E':[], 'R':['clowhom', 'squamish']}, 'G5':{'S':[], 'N':['sog4'], 'W':[], 'E':['sog3'], 'R':['englishman', 'tsolum', 'oyster']}, 'G6':{'S':['sog4'], 'N':['sog5'], 'W':[], 'E':[], 'R':[]}, 'A1':{'S':['ai2'], 'N':[], 'W':['ai1'], 'E':[], 'R':[]}, 'A2':{'S':['ai3'], 'N':['ai2'], 'W':[], 'E':[], 'R':[]}, 'A3':{'S':['hc1'], 'N':['ai3'], 'W':[], 'E':['ai4'], 'R':[]}, 'M1':{'S':['mb1'], 'N':['wb1'], 'W':['ai4'], 'E':[], 'R':[]}, 'M2':{'S':['mb2'], 'N':['mb1'], 'W':[], 'E':[], 'R':[]}, 'M3':{'S':['mb3'], 'N':['mb2'], 'W':[], 'E':[], 'R':['green', 'cedar']}, 'M4':{'S':['mb4'], 'N':['mb3'], 'W':[], 'E':[], 'R':[]}, 'M5':{'S':['mb5'], 'N':['mb4'], 'W':[], 'E':[], 'R':[]}, 'M6':{'S':['tn1'], 'N':['mb5'], 'W':[], 'E':[], 'R':['puyallup']}, 'T1':{'S':['tn2'], 'N':['tn1'], 'W':[], 'E':[], 'R':[]}, 'T2':{'S':['tn3'], 'N':['tn2'], 'W':[], 'E':[], 'R':[]}, 'S1':{'S':[], 'N':['tn3'], 'W':['ss1'], 'E':[], 'R':[]}, 'S2':{'S':[], 'N':[], 'W':['ss2'], 'E':['ss1'], 'R':['nisqually']}, 'S3':{'S':[], 'N':[], 'W':['ss3'], 'E':['ss2'], 'R':[]}, 'S4':{'S':[], 'N':[], 'W':[], 'E':['ss3'], 'R':['deschutes']}, 'W1':{'S':['wb1'], 'N':['wb2'], 'W':[], 'E':[], 'R':['snohomish']}, 'W2':{'S':['wb2'], 'N':['wb3'], 'W':[], 'E':[], 'R':['stillaguamish']}, 'W3':{'S':['wb3'], 'N':[], 'W':[], 'E':['wb4'], 'R':[]}, 'W4':{'S':[], 'N':[], 'W':['wb4', 'dp'], 'E':[], 'R':['skagit']}, 'H1':{'S':['hc2'], 'N':['hc1'], 'W':[], 'E':[], 'R':[]}, 'H2':{'S':[], 'N':['hc2'], 'W':['hc3'], 'E':[], 'R':[]}, 'H3':{'S':['hc4'], 'N':[], 'W':[], 'E':['hc3'], 'R':['duckabush', 'dosewallips']}, 'H4':{'S':['hc5'], 'N':['hc4'], 'W':[], 'E':[], 'R':['hamma']}, 'H5':{'S':['hc6'], 'N':['hc5'], 'W':[], 'E':[], 'R':[]}, 'H6':{'S':[], 'N':['hc6'], 'W':[], 'E':['hc7'], 'R':['skokomish']}, 'H7':{'S':[], 'N':[], 'W':['hc7'], 'E':['hc8'], 'R':[]}, 'H8':{'S':[], 'N':[], 'W':['hc8'], 'E':[], 'R':[]}, #'##':{'S':[], 'N':[], 'W':[], 'E':[], 'R':[]}, } # make lists of the various segment sequences (used below) ssJ = ['J'+str(s) for s in range(1,5)] ssM = ['M'+str(s) for s in range(1,7)] ssA = ['A'+str(s) for s in range(1,4)] ssT = ['T'+str(s) for s in range(1,3)] ssS = ['S'+str(s) for s in range(1,5)] ssG = ['G'+str(s) for s in range(1,7)] ssW = ['W'+str(s) for s in range(1,5)] ssH = ['H'+str(s) for s in range(1,9)] # This list is the same as the keys for all the dicts below. # we make it to have a fixed order for processing things, since # the order of the keys of a dict may not be fixed. channel_list = ['Juan de Fuca to Strait of Georgia', 'Admiralty Inlet to South Sound', 'Hood Canal', 'Whidbey Basin'] # also cue up a line for the target salinities from the TEF sections channel_dict = {'Juan de Fuca to Strait of Georgia':['jdf1','jdf2','jdf3','jdf4', 'sji1', 'sji2', 'sog1','sog2','sog3','sog4','sog5'], 'Admiralty Inlet to South Sound': ['ai1', 'ai2', 'ai3','ai4', 'mb1','mb2','mb3','mb4','mb5', 'tn1','tn2','tn3', 'ss1','ss2','ss3'], 'Hood Canal':['hc1','hc2','hc3','hc4','hc5','hc6','hc7','hc8'], 'Whidbey Basin':['wb1','wb2','wb3','wb4','dp']} long_channel_dict = {'Juan de Fuca to Strait of Georgia':['jdf1','jdf2','jdf3','jdf4', 'sji1', 'sji2', 'sog1','sog2','sog3','sog4','sog5'], 'Admiralty Inlet to South Sound': ['jdf4', 'ai1', 'ai2', 'ai3','ai4', 'mb1','mb2','mb3','mb4','mb5', 'tn1','tn2','tn3', 'ss1','ss2','ss3'], 'Hood Canal':['ai3', 'hc1','hc2','hc3','hc4','hc5','hc6','hc7','hc8'], 'Whidbey Basin':['ai4', 'wb1','wb2','wb3','wb4','dp']} seg_dict = {'Juan de Fuca to Strait of Georgia': ssJ + ssG, 'Admiralty Inlet to South Sound': ['J4'] + ssA + ssM + ssT + ssS, 'Hood Canal': ['A3'] + ssH, 'Whidbey Basin': ['M1'] + ssW} # same as seg_dict, but without the connections to adjoining channels short_seg_dict = {'Juan de Fuca to Strait of Georgia': ssJ + ssG, 'Admiralty Inlet to South Sound': ssA + ssM + ssT + ssS, 'Hood Canal': ssH, 'Whidbey Basin': ssW} # colors to associate with each channel (the keys in channel_ and seg_dict) clist = ['blue', 'red', 'olive', 'orange'] c_dict = dict(zip(channel_list, clist)) def make_dist(x,y): NS = len(x) xs = np.zeros(NS) ys =
np.zeros(NS)
numpy.zeros
########################################################## # lane_detector.py # # SPDX-FileCopyrightText: Copyright 2021 <NAME> # # SPDX-License-Identifier: MIT # # Lane detection techniques # # ######################################################## # # Import libraries import cv2 import numpy as np import math from collections import deque class LaneDetector: def __init__(self, is_video=False, width=1280, height=720, draw_area = True, queue_len=10): # Roi self.vertices = None # Video pipline self.is_video = is_video # Frame dimension self.width = width self.height = height # Draw self.draw_area_err = True self.draw_area = draw_area self.road_color = (204, 255, 153) self.l_lane_color = (0, 0, 255) self.r_lane_color = (255, 0, 0) self.lane_thickness = 30 # Lane search self.n_windows = 9 self.margin = 100 self.nb_margin = 100 self.px_threshold = 50 self.radii_threshold = 10 self.min_lane_dis = 600 # Current lanes and radii self.l_curr_fit = None self.r_curr_fit = None self.l_diff_fit = 0 self.r_diff_fit = 0 self.l_curr_cr = 0 self.r_curr_cr = 0 self.lost_track = 0 self.lost_radii = 0 self.poly_thr_a = 0.001 self.poly_thr_b = 0.4 self.poly_thr_c = 150 # Convert px to meter self.px_to_m_y = 30/720 # meters per pixel in y dimension self.px_to_m_x = 3.7/700 # meters per pixel in x dimension # Averaging self.queue_len = queue_len self.l_fit_que = deque(maxlen=self.queue_len) self.r_fit_que = deque(maxlen=self.queue_len) self.l_rad_que = deque(maxlen=self.queue_len) self.r_rad_que = deque(maxlen=self.queue_len) self.weights = np.arange(1, self.queue_len + 1) / self.queue_len # No Text on frame self.no_text = False """ General methods for setting files and getting information """ def set_vertices(self, vertices): self.vertices = vertices def reset_detector(self): self.empty_queue() self.vertices = None self.l_curr_fit = None self.r_curr_fit = None self.l_diff_fit = 0 self.r_diff_fit = 0 self.l_curr_cr = 0 self.r_curr_cr = 0 self.lost_track = 0 self.lost_radii = 0 def empty_queue(self): self.l_fit_que = deque(maxlen=self.queue_len) self.r_fit_que = deque(maxlen=self.queue_len) self.l_rad_que = deque(maxlen=self.queue_len) self.r_rad_que = deque(maxlen=self.queue_len) """ Find lanes """ def calculate_histogram(self, frame): return np.sum(frame, axis=0) def get_hist_peaks(self, histogram): center = np.int(histogram.shape[0]//2) left_peak = np.argmax(histogram[:center]) right_peak = np.argmax(histogram[center:]) + center return left_peak, right_peak def cr_to_degree(self, cr, arc_length): dc = (180 * arc_length) / (math.pi * cr) return dc/2 def find_lanes(self, frame): self.check_track() if self.l_curr_fit is None or self.r_curr_fit is None: self.empty_queue() histogram = self.calculate_histogram(frame) left_peak, right_peak = self.get_hist_peaks(histogram) leftx, lefty, rightx, righty = self.sliding_window(frame, left_peak, right_peak) left_fit, right_fit = self.fit_polynomial(leftx, lefty, rightx, righty) left_fit_cr, right_fit_cr = self.fit_polynomial( leftx * self.px_to_m_x, lefty * self.px_to_m_y, rightx * self.px_to_m_x, righty * self.px_to_m_y) # Get radii of lane curvature left_rad, right_rad = self.calculate_poly_radii(frame, left_fit_cr, right_fit_cr) self.r_curr_cr = left_rad self.l_curr_cr = right_rad self.r_curr_fit = right_fit self.l_curr_fit = left_fit self.l_fit_que.append(left_fit) self.r_fit_que.append(right_fit) self.l_rad_que.append(left_rad) self.r_rad_que.append(right_rad) else: left_fit, right_fit, left_fit_cr, right_fit_cr, _ = self.nearby_search( frame, np.average(self.l_fit_que, 0, self.weights[-len(self.l_fit_que):]), np.average(self.r_fit_que, 0, self.weights[-len(self.r_fit_que):])) self.l_fit_que.append(left_fit) self.r_fit_que.append(right_fit) avg_rad = round(np.mean([np.average(self.r_rad_que, 0, self.weights[-len(self.r_rad_que):]), np.average(self.l_rad_que, 0, self.weights[-len(self.l_rad_que):])]),0) try: return (self.draw_lanes(frame, np.average(self.l_fit_que, 0, self.weights[-len(self.l_fit_que):]), np.average(self.r_fit_que, 0, self.weights[-len(self.r_fit_que):])), avg_rad) except: return (np.zeros_like(cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)), None) def sliding_window(self, frame, left_peak, right_peak): # Set window height window_height = np.int(frame.shape[0]//self.n_windows) # Find non-zero values nonzero = frame.nonzero() nonzero_y = np.array(nonzero[0]) nonzero_x = np.array(nonzero[1]) # Current positions to be updated later for each window in n_windows leftx_current = left_peak rightx_current = right_peak # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Step through the windows one by one for window in range(self.n_windows): # Identify window boundaries in x and y (and right and left) win_y_low = frame.shape[0] - (window + 1) * window_height win_y_high = frame.shape[0] - window * window_height # Find the four below boundaries of the window win_xleft_low = leftx_current - self.margin win_xleft_high = leftx_current + self.margin win_xright_low = rightx_current - self.margin win_xright_high = rightx_current + self.margin # Identify the nonzero pixels in x and y within the window good_left_inds = ((nonzero_y >= win_y_low ) & (nonzero_y < win_y_high) &\ (nonzero_x >= win_xleft_low) & (nonzero_x < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzero_y >= win_y_low ) & (nonzero_y < win_y_high) &\ (nonzero_x >= win_xright_low) & (nonzero_x < win_xright_high)).nonzero()[0] # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) # If you found > px_threshold pixels, recenter next window # (`right` or `leftx_current`) on their mean position if len(good_left_inds) > self.px_threshold: leftx_current = np.int(np.mean(nonzero_x[good_left_inds])) if len(good_right_inds) > self.px_threshold: rightx_current = np.int(np.mean(nonzero_x[good_right_inds])) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass # Extract left and right line pixel positions leftx = nonzero_x[left_lane_inds] lefty = nonzero_y[left_lane_inds] rightx = nonzero_x[right_lane_inds] righty = nonzero_y[right_lane_inds] return leftx, lefty, rightx, righty def calculate_poly_radii(self, frame, left_fit, right_fit): frame_height = np.linspace(0, frame.shape[0] - 1, frame.shape[0]) max_px_window = np.max(frame_height) try: left_rad = ((1 + (2 * left_fit[0] * max_px_window * self.px_to_m_y + left_fit[1])**2)**1.5) / np.absolute(2 * left_fit[0]) right_rad = ((1 + (2 * right_fit[0] * max_px_window * self.px_to_m_y + right_fit[1])**2)**1.5) / np.absolute(2 * right_fit[0]) if math.isinf(left_rad) or math.isinf(right_rad): return self.l_curr_cr, self.r_curr_cr except: return self.l_curr_cr, self.r_curr_cr return int(left_rad), int(right_rad) def check_radii(self, left_rad, right_rad): avg_l = np.average(self.l_rad_que, 0, self.weights[-len(self.l_rad_que):]) avg_r = np.average(self.r_rad_que, 0, self.weights[-len(self.r_rad_que):]) abs_l__diff = np.absolute(avg_l - left_rad) abs_r__diff = np.absolute(avg_r - right_rad) if abs_l__diff > (avg_l / self.radii_threshold) and self.lost_radii < 5 and abs_r__diff > (avg_r / self.radii_threshold): self.lost_radii += 1 return False else: self.lost_radii = 0 return True def fit_polynomial(self, leftx, lefty, rightx, righty): try: left_fit =
np.polyfit(lefty, leftx, 2)
numpy.polyfit
import torch from model import NPT import numpy as np import pymesh net_G=NPT() net_G.cuda() net_G.load_state_dict(torch.load('original_169.model')) def face_reverse(faces): identity_faces=faces face_dict={} for i in range(len(random_sample)): face_dict[random_sample[i]]=i new_f=[] for i in range(len(identity_faces)): new_f.append([face_dict[identity_faces[i][0]],face_dict[identity_faces[i][1]],face_dict[identity_faces[i][2]]]) new_face=
np.array(new_f)
numpy.array