@property def stride(self): """Returns the stride.""" # Backwards compatibility with old stride format. return (1,) + self._stride + (1,) @property def rate(self): """Returns the dilation rate.""" return self._rate @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in Conv2D Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers @property def mask(self): """Returns the mask.""" return self._mask @property def data_format(self): """Returns the data format.""" return self._data_format def clone(self, name=None): """Returns a cloned `Conv2D` module. Args: name: Optional string assigning name of cloned module. The default name is constructed by appending "_clone" to `self.module_name`. Returns: `Conv2D` module. """ if name is None: name = self.module_name + "_clone" return Conv2D(output_channels=self.output_channels, kernel_shape=self.kernel_shape, stride=self.stride, rate=self.rate, padding=self.padding, use_bias=self.has_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, mask=self.mask, data_format=self.data_format, custom_getter=self._custom_getter, name=name) # Implements Transposable interface. @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape # Implements Transposable interface. def transpose(self, name=None): """Returns matching `Conv2DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1. """ if any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot transpose a dilated convolution module.") if name is None: name = self.module_name + "_transpose" def output_shape(): if self._data_format != DATA_FORMAT_NCHW: return self.input_shape[1:3] else: return self.input_shape[2:4] return Conv2DTranspose(output_channels=lambda: self._input_channels, output_shape=output_shape, kernel_shape=self.kernel_shape, stride=self.stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name) class Conv2DTranspose(base.AbstractModule, base.Transposable): """Spatial transposed / reverse / up 2D convolution module, including bias. This acts as a light wrapper around the TensorFlow op `tf.nn.conv2d_transpose` abstracting away variable creation and sharing. """ def __init__(self, output_channels, output_shape=None, kernel_shape=None, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, data_format=DATA_FORMAT_NHWC, custom_getter=None, name="conv_2d_transpose"): """Constructs a `Conv2DTranspose module`. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure `output_channels` can be called, returning an integer, when build is called. output_shape: Output shape of transpose convolution. Can be either an iterable of integers or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that `output_shape` can be called, returning an iterable of format `(out_height, out_width)` when `build` is called. Note that `output_shape` defines the size of output signal domain, as opposed to the shape of the output `Tensor`. If a None value is given, a default shape is automatically calculated (see docstring of _default_transpose_size function for more details). kernel_shape: Sequence of kernel sizes (of size 2), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 2), or integer that is used to define stride in all dimensions. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. data_format: A string. Specifies whether the channel dimension of the input and output is the last dimension (default, NHWC), or the second dimension ("NCHW"). custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If the given kernel shape is neither an integer nor a sequence of two integers. base.IncompatibleShapeError: If the given stride is neither an integer nor a sequence of two or four integers. ValueError: If the given padding is not `snt.VALID` or `snt.SAME`. ValueError: If the given data_format is not a supported format (see SUPPORTED_DATA_FORMATS). ValueError: If the given kernel_shape is `None`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(Conv2DTranspose, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels if output_shape is None: self._output_shape = None self._use_default_output_shape = True else: self._use_default_output_shape = False if callable(output_shape): self._output_shape = output_shape else: self._output_shape = _fill_and_verify_parameter_shape(output_shape, 2, "output_shape") self._input_shape = None if data_format not in SUPPORTED_DATA_FORMATS: raise ValueError("Invalid data_format {:s}. Allowed formats " "{:s}".format(data_format, SUPPORTED_DATA_FORMATS)) self._data_format = data_format if kernel_shape is None: raise ValueError("`kernel_shape` cannot be None.") self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, "kernel") # We want to support passing native strides akin to [1, m, n, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 4: if not stride[0] == stride[3] == 1: raise base.IncompatibleShapeError( "Invalid stride: First and last element must be 1.") self._stride = tuple(stride) else: self._stride = _fill_and_one_pad_stride(stride, 2) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the Conv2DTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 3 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 4D Tensor of shape [batch_size, input_height, input_width, input_channels]. Returns: A 4D Tensor of shape [batch_size, output_height, output_width, output_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions; or if the input tensor has an unknown `input_channels`; or or if `output_shape` is an iterable and is not in the format `(out_height, out_width)`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 4: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_height, " "input_width, input_channels)") if self._data_format == DATA_FORMAT_NCHW: input_channels = self._input_shape[1] else: input_channels = self._input_shape[3] if input_channels is None: raise base.IncompatibleShapeError( "Number of input channels must be known at module build time") if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was " + inputs.dtype) if self._use_default_output_shape: self._output_shape = ( lambda: _default_transpose_size(self._input_shape[1:-1], # pylint: disable=g-long-lambda self.stride[1:-1], kernel_shape=self.kernel_shape, padding=self.padding)) if len(self.output_shape) != 2: raise base.IncompatibleShapeError("Output shape must be specified as " "(output_height, output_width)") weight_shape = (self._kernel_shape[0], self._kernel_shape[1], self.output_channels, input_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: fan_in_shape = weight_shape[:2] + (weight_shape[3],) self._initializers["w"] = create_weight_initializer(fan_in_shape) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) # Use tensorflow shape op to manipulate inputs shape, so that unknown batch # size - which can happen when using input placeholders - is handled # correcly. batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) out_shape = tuple(self.output_shape) out_channels = (self.output_channels,) if self._data_format == DATA_FORMAT_NCHW: out_shape_tuple = out_channels + out_shape else: out_shape_tuple = out_shape + out_channels conv_output_shape = tf.convert_to_tensor(out_shape_tuple) output_shape = tf.concat([batch_size, conv_output_shape], 0) outputs = tf.nn.conv2d_transpose(inputs, self._w, output_shape, strides=self._stride, padding=self._padding, data_format=self._data_format) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b, data_format=self._data_format) # Recover output tensor shape value and pass it to set_shape in order to # enable shape inference. batch_size_value = inputs.get_shape()[0] if self._data_format == DATA_FORMAT_NCHW: output_shape_value = ((batch_size_value,) + (self.output_channels,) + self.output_shape) else: output_shape_value = ((batch_size_value,) + self.output_shape + (self.output_channels,)) outputs.set_shape(output_shape_value) return outputs @property def output_channels(self): """Returns the number of output channels.""" if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" return self._stride @property def output_shape(self): """Returns the output shape.""" if self._output_shape is None: self._ensure_is_connected() if callable(self._output_shape): self._output_shape = tuple(self._output_shape()) return self._output_shape @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in Conv2DTranspose Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers # Implements Transposable interface. @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape # Implements Transposable interface. def transpose(self, name=None): """Returns matching `Conv2D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2D` module. """ if name is None: name = self.module_name + "_transpose" return Conv2D(output_channels=lambda: self.input_shape[-1], kernel_shape=self.kernel_shape, stride=self.stride[1:-1], padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name) class Conv1D(base.AbstractModule, base.Transposable): """1D convolution module, including optional bias. This acts as a light wrapper around the TensorFlow op `tf.nn.convolution`, abstracting away variable creation and sharing. """ def __init__(self, output_channels, kernel_shape, stride=1, rate=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="conv_1d"): """Constructs a Conv1D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that output_channels can be called, returning an integer, when `build` is called. kernel_shape: Sequence of kernel sizes (of size 1), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 1), or integer that is used to define stride in all dimensions. rate: Sequence of dilation rates (of size 1), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard 2D convolution, `rate > 1` corresponds to dilated convolution. Cannot be > 1 if any of `stride` is also > 1. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). The default initializer for the weights is a truncated normal initializer, which is commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the bias is a zero initializer. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If the given kernel shape is not an integer; or if the given kernel shape is not a sequence of two integers. base.IncompatibleShapeError: If the given stride is not an integer; or if the given stride is not a sequence of two or four integers. base.IncompatibleShapeError: If the given rate is not an integer; or if the given rate is not a sequence of two integers. base.NotSupportedError: If rate in any dimension and the stride in any dimension are simultaneously > 1. ValueError: If the given padding is not `snt.VALID` or `snt.SAME`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(Conv1D, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels self._input_shape = None self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 1, "kernel") # The following is for backwards-compatibility from when we used to accept # 3-strides of the form [1, m, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 3: self._stride = tuple(stride)[1:-1] else: self._stride = _fill_and_verify_parameter_shape(stride, 1, "stride") self._rate = _fill_and_verify_parameter_shape(rate, 1, "rate") if any(x > 1 for x in self._stride) and any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot have stride > 1 with rate > 1") self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the Conv1D module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 3D Tensor of shape [batch_size, input_length, input_channels]. Returns: A 3D Tensor of shape [batch_size, output_length, output_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.IncompatibleShapeError: If a mask is present and its shape is incompatible with the shape of the weights. base.UnderspecifiedError: If the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 3: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_length, input_" "channels)") if self._input_shape[2] is None: raise base.UnderspecifiedError( "Number of input channels must be known at module build time") else: input_channels = self._input_shape[2] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError( "Input must have dtype tf.float32, but dtype was {}".format( inputs.dtype)) weight_shape = ( self._kernel_shape[0], input_channels, self.output_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2]) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) outputs = tf.nn.convolution(inputs, self._w, strides=self._stride, padding=self._padding, dilation_rate=self._rate) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs @property def output_channels(self): """Returns the number of output channels.""" if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" # Backwards compatibility with old stride format. return (1,) + self._stride + (1,) @property def rate(self): """Returns the dilation rate.""" return self._rate @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" return self._w @property def b(self): """Returns the Variable containing the bias.""" return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers # Implement Transposable interface def transpose(self, name=None): """Returns matching `Conv1DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1. """ if any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot transpose a dilated convolution module.") if name is None: name = self.module_name + "_transpose" return Conv1DTranspose(output_channels=lambda: self.input_shape[-1], output_shape=lambda: self.input_shape[1:-1], kernel_shape=self.kernel_shape, stride=self.stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, custom_getter=self._custom_getter, name=name) class Conv1DTranspose(base.AbstractModule, base.Transposable): """1D transposed / reverse / up 1D convolution module, including bias. This performs a 1D transpose convolution by lightly wrapping the TensorFlow op `tf.nn.conv2d_transpose`, setting the size of the height dimension of the image to 1. """ def __init__(self, output_channels, output_shape=None, kernel_shape=None, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="conv_1d_transpose"): """Constructs a Conv1DTranspose module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure `output_channels` can be called, returning an integer, when build is called. output_shape: Output shape of transpose convolution. Can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that `output_shape` can be called, returning an iterable of format `(out_length)` when build is called. If a None value is given, a default shape is automatically calculated (see docstring of _default_transpose_size function for more details). kernel_shape: Sequence of kernel sizes (of size 1), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 1), or integer that is used to define stride in all dimensions. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If the given kernel shape is not an integer; or if the given kernel shape is not a sequence of two integers. base.IncompatibleShapeError: If the given stride is not an integer; or if the given stride is not a sequence of two or four integers. ValueError: If the given padding is not `snt.VALID` or `snt.SAME`. ValueError: If the given kernel_shape is `None`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(Conv1DTranspose, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels if output_shape is None: self._output_shape = None self._use_default_output_shape = True else: self._use_default_output_shape = False if callable(output_shape): self._output_shape = output_shape elif isinstance(output_shape, numbers.Integral): self._output_shape = (output_shape,) elif isinstance(output_shape, collections.Iterable): self._output_shape = tuple(output_shape) self._input_shape = None if kernel_shape is None: raise ValueError("`kernel_shape` cannot be None.") self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 1, "kernel") # We want to support passing 'native' strides akin to [1, m, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 3: if not stride[0] == stride[2] == 1: raise base.IncompatibleShapeError( "Invalid stride: First and last element must be 1.") # Need to make a 4D stride in order to use tf.nn.conv2d_transpose. self._stride = (1,) + tuple(stride,) else: # Need to make a 4D stride in order to use tf.nn.conv2d_transpose. self._stride = (1,) + _fill_and_one_pad_stride(stride, 1) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the Conv1DTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 3D Tensor of shape `[batch_size, input_length, input_channels]`. Returns: A 3D Tensor of shape `[batch_size, output_length, output_channels]`. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.IncompatibleShapeError: If the input tensor has an unknown `input_channels`. base.IncompatibleShapeError: If `output_shape` is not an integer or iterable of length 1. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 3: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_length, " "input_channels)") if self._input_shape[2] is None: raise base.UnderspecifiedError( "Number of input channels must be known at module build time") input_channels = self._input_shape[2] if self._use_default_output_shape: self._output_shape = ( lambda: _default_transpose_size(self._input_shape[1:-1], # pylint: disable=g-long-lambda self.stride[2], kernel_shape=self.kernel_shape, padding=self.padding)) if len(self.output_shape) != 1: raise base.IncompatibleShapeError( "Output shape must be specified as (output_length)") if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was {}" .format(inputs.dtype)) weight_shape = ( 1, self._kernel_shape[0], self.output_channels, input_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: fan_in_shape = (weight_shape[1], weight_shape[3]) self._initializers["w"] = create_weight_initializer(fan_in_shape) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) out_shape = (1, self.output_shape[0]) out_channels = (self.output_channels,) out_shape_tuple = out_shape + out_channels conv_output_shape = tf.convert_to_tensor(out_shape_tuple) tf_out_shape = tf.concat([batch_size, conv_output_shape], 0) # Add an extra dimension to the input - a height of 1. inputs = tf.expand_dims(inputs, 1) outputs = tf.nn.conv2d_transpose(inputs, self._w, tf_out_shape, strides=self._stride, padding=self._padding) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) # Remove the superfluous height dimension to return a 3D tensor. outputs = tf.squeeze(outputs, [1]) # Set the tensor sizes in order for shape inference. batch_size_value = inputs.get_shape()[0] output_shape_value = ((batch_size_value,) + self.output_shape + (self.output_channels,)) outputs.set_shape(output_shape_value) return outputs @property def output_channels(self): """Returns the number of output channels.""" if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" return self._stride @property def output_shape(self): """Returns the output shape.""" if self._output_shape is None: self._ensure_is_connected() if callable(self._output_shape): self._output_shape = self._output_shape() return self._output_shape @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in Conv1DTranspose Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers # Implement Transposable interface. def transpose(self, name=None): """Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module. """ if name is None: name = self.module_name + "_transpose" return Conv1D(output_channels=lambda: self.input_shape[-1], kernel_shape=self.kernel_shape, stride=(self._stride[2],), padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, custom_getter=self._custom_getter, name=name) class CausalConv1D(Conv1D): """1D convolution module, including optional bias. This acts as a light wrapper around Conv1D ensuring that the outputs at index `i` only depend on indices smaller than `i` (also known as a causal convolution). For further details on the theoretical background, refer to: https://arxiv.org/abs/1610.10099 """ def __init__(self, output_channels, kernel_shape, stride=1, rate=1, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="causal_conv_1d"): """Constructs a CausalConv1D module. Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that output_channels can be called, returning an integer, when `build` is called. kernel_shape: Sequence of kernel sizes (of size 1), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 1), or integer that is used to define stride in all dimensions. rate: Sequence of dilation rates (of size 1), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard 2D convolution, `rate > 1` corresponds to dilated convolution. Cannot be > 1 if any of `stride` is also > 1. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). The default initializer for the weights is a truncated normal initializer, which is commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the bias is a zero initializer. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If the given kernel shape is not an integer; or if the given kernel shape is not a sequence of two integers. base.IncompatibleShapeError: If the given stride is not an integer; or if the given stride is not a sequence of two or four integers. base.IncompatibleShapeError: If the given rate is not an integer; or if the given rate is not a sequence of two integers. base.NotSupportedError: If rate in any dimension and the stride in any dimension are simultaneously > 1. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(CausalConv1D, self).__init__( output_channels=output_channels, kernel_shape=kernel_shape, stride=stride, rate=rate, padding=VALID, # Can't be configured by the user. use_bias=use_bias, initializers=initializers, partitioners=partitioners, regularizers=regularizers, custom_getter=custom_getter, name=name) def _build(self, inputs): """Connects the CausalConv1D module into the graph, with `inputs` as input. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 2 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 3D Tensor of shape [batch_size, input_length, input_channels]. Returns: A 3D Tensor of shape [batch_size, output_length, output_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.IncompatibleShapeError: If a mask is present and its shape is incompatible with the shape of the weights. base.UnderspecifiedError: If the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 3: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_length, input_" "channels)") if self._input_shape[2] is None: raise base.UnderspecifiedError( "Number of input channels must be known at module build time") else: input_channels = self._input_shape[2] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was {}". format(inputs.dtype)) weight_shape = (self._kernel_shape[0], input_channels, self.output_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2]) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable( "w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) pad_amount = int((self._kernel_shape[0] - 1) * self._rate[0]) padded_inputs = tf.pad(inputs, paddings=[[0, 0], [pad_amount, 0], [0, 0]]) outputs = tf.nn.convolution( padded_inputs, self._w, strides=self._stride, padding=VALID, dilation_rate=self._rate) if self._use_bias: self._b = tf.get_variable( "b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs class InPlaneConv2D(base.AbstractModule): """Applies an in-plane convolution to each channel with tied filter weights. This acts as a light wrapper around the TensorFlow op `tf.nn.depthwise_conv2d`; it differs from the DepthWiseConv2D module in that it has tied weights (i.e. the same filter) for all the in-out channel pairs. """ def __init__(self, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="in_plane_conv2d"): """Constructs an InPlaneConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: kernel_shape: Iterable with 2 elements in the layout [filter_height, filter_width]; or integer that is used to define the list in all dimensions. stride: Iterable with 2 or 4 elements of kernel strides, or integer that is used to define stride in all dimensions. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). partitioners: Optional dict containing partitioners to partition the filters (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: TypeError: If `kernel_shape` is not an integer or a sequence of 2 integers. ValueError: If `stride` is neither an integer nor a sequence of 2 or 4 integers. ValueError: If stride is a sequence of 4 integers, the first and last dimensions are not equal to 1. ValueError: If `padding` is not `snt.VALID` or `snt.SAME`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(InPlaneConv2D, self).__init__(custom_getter=custom_getter, name=name) self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, "kernel") # We want to support passing native strides akin to [1, m, n, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 4: if not stride[0] == stride[3] == 1: raise ValueError("Invalid stride: First and last element must be 1.") self._stride = tuple(stride) else: self._stride = _fill_and_one_pad_stride(stride, 2) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) self._input_shape = None # Determined in build() from the input. self._input_channels = None # Determined in build() from the input. @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels]. Returns: A 4D Tensor of shape: [batch_size, output_height, output_width, input_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred input size does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions; or if the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 4: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_height, " "input_width, input_channels)") if self._input_shape[3] is None: raise base.IncompatibleShapeError( "Number of input channels must be known at module build time") self._input_channels = self._input_shape[3] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was " + inputs.dtype.name) weight_shape = ( self._kernel_shape[0], self._kernel_shape[1], 1, 1) bias_shape = (self._input_channels,) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2]) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) tiled_weights = tf.tile(self._w, [1, 1, self._input_channels, 1]) outputs = tf.nn.depthwise_conv2d(inputs, tiled_weights, strides=self._stride, padding=self._padding) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs @property def input_channels(self): """Returns the number of input channels.""" self._ensure_is_connected() return self._input_channels @property def output_channels(self): """Returns the number of output channels i.e. number of input channels.""" self._ensure_is_connected() return self._input_channels @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" return self._stride @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in InPlaneConv2D Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers class DepthwiseConv2D(base.AbstractModule): """Spatial depthwise 2D convolution module, including bias. This acts as a light wrapper around the TensorFlow ops `tf.nn.depthwise_conv2d`, abstracting away variable creation and sharing. """ def __init__(self, channel_multiplier, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="conv_2d_depthwise"): """Constructs a DepthwiseConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: channel_multiplier: Number of channels to expand convolution to. Must be an integer. Must be > 0. When `channel_multiplier` is set to 1, apply a different filter to each input channel producing one output channel per input channel. Numbers larger than 1 cause multiple different filters to be applied to each input channel, with their outputs being concatenated together, producing `channel_multiplier` * `input_channels` output channels. kernel_shape: Iterable with 2 elements in the following layout: [filter_height, filter_width] or integer that is used to define the list in all dimensions. stride: Iterable with 2 or 4 elements of kernel strides, or integer that is used to define stride in all dimensions. Layout of list: In case of 4 elements: `[1, stride_height, stride_widith, 1]` In case of 2 elements: `[stride_height, stride_width]`. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). partitioners: Optional dict containing partitioners for the filters (with key 'w') and the biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If `kernel_shape` is not an integer or a sequence of 3 integers. base.IncompatibleShapeError: If `stride` is neither an integer nor a sequence of 2 or 4 integers. base.IncompatibleShapeError: If `stride` is a sequence of 4 integers and `stride[0] != stride[3]`. ValueError: if `channel_multiplier` is not an integer >= 1. ValueError: If `padding` is not `snt.VALID` or `snt.SAME`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(DepthwiseConv2D, self).__init__(custom_getter=custom_getter, name=name) if (not isinstance(channel_multiplier, numbers.Integral) or channel_multiplier < 1): raise ValueError("channel_multiplier (=%d), must be integer >= 1" % channel_multiplier) self._channel_multiplier = channel_multiplier self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, "kernel") # We want to support passing native strides akin to [1, m, n, 1] if isinstance(stride, collections.Iterable) and len(stride) == 4: if not stride[0] == stride[3] == 1: raise base.IncompatibleShapeError( "Invalid stride: First and last element must be 1.") self._stride = tuple(stride) else: self._stride = _fill_and_one_pad_stride(stride, 2) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) self._input_shape = None # Determined in build() from the input. self._input_channels = None # Determined in build() from the input. self._output_channels = None # Ditto, determined from the input and kernel. @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final 3 dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 4D Tensor of shape: `[batch_size, input_height, input_width, input_channels]`. Returns: A 4D Tensor of shape: `[batch_size, output_height, output_width, output_channels]`, where `output_channels = input_channels * channel_multiplier`; see `kernel_shape`. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions; or if the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 4: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_height, " "input_width, input_channels)") if self._input_shape[3] is None: raise base.IncompatibleShapeError( "Number of input channels must be known at module build time") self._input_channels = self._input_shape[3] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was " + inputs.dtype.name) # For depthwise conv, output_channels = in_channels * channel_multiplier. # By default, depthwise conv applies a different filter to every input # channel. If channel_multiplier > 1, one input channel is used to produce # `channel_multiplier` outputs, which are then concatenated together. # This results in: self._output_channels = self._input_channels * self._channel_multiplier weight_shape = (self._kernel_shape[0], self._kernel_shape[1], self._input_channels, self._channel_multiplier) bias_shape = (self._output_channels,) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:3]) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) outputs = tf.nn.depthwise_conv2d(inputs, self._w, strides=self._stride, padding=self._padding) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs @property def input_channels(self): """Returns the number of input channels.""" self._ensure_is_connected() return self._input_channels @property def output_channels(self): """Returns the number of output channels.""" self._ensure_is_connected() return self._output_channels @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def channel_multiplier(self): """Returns the channel multiplier.""" return self._channel_multiplier @property def stride(self): """Returns the stride.""" return self._stride @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in DepthwiseConv2D Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers class SeparableConv2D(base.AbstractModule): """Performs an in-plane convolution to each channel independently. This acts as a light wrapper around the TensorFlow op `tf.nn.separable_conv2d`, abstracting away variable creation and sharing. """ def __init__(self, output_channels, channel_multiplier, kernel_shape, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="Separable_conv2d"): """Constructs a SeparableConv2D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. Must be an integer. channel_multiplier: Number of channels to expand pointwise (depthwise) convolution to. Must be an integer. Must be > 0. When `channel_multiplier` is set to 1, applies a different filter to each input channel. Numbers larger than 1 cause the filter to be applied to `channel_multiplier` input channels. Outputs are concatenated together. kernel_shape: List with 2 elements in the following layout: [filter_height, filter_width] or integer that is used to define the list in all dimensions. stride: List with 4 elements of kernel strides, or integer that is used to define stride in all dimensions. Layout of list: [1, stride_y, stride_x, 1]. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with keys 'w_dw' for depthwise and 'w_pw' for pointwise) or biases (with key 'b'). partitioners: Optional dict containing partitioners to partition the filters (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with keys 'w_dw' for depthwise and 'w_pw' for pointwise) and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: ValueError: If either `output_channels` or `channel_multiplier` is not an integer or less than 1. base.IncompatibleShapeError: If `kernel_shape` is not an integer or a list of 3 integers. base.IncompatibleShapeError: If `stride` is neither an integer nor a list of 2 or 4 integers. ValueError: If `padding` is not `snt.VALID` or `snt.SAME`; KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w_dw', 'w_pw' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(SeparableConv2D, self).__init__(custom_getter=custom_getter, name=name) if not isinstance(output_channels, numbers.Integral) or output_channels < 1: raise ValueError("output_channels (={}), must be integer >= 1".format( output_channels)) self._output_channels = output_channels if (not isinstance(channel_multiplier, numbers.Integral) or channel_multiplier < 1): raise ValueError("channel_multiplier ({}), must be integer >= 1".format( channel_multiplier)) self._channel_multiplier = channel_multiplier self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2, "kernel") # We want to support passing native strides akin to [1, m, n, 1]. if isinstance(stride, collections.Sequence) and len(stride) == 4: if not stride[0] == stride[3] == 1: raise base.IncompatibleShapeError( "Invalid stride: First and last element must be 1.") if not (isinstance(stride[1], numbers.Integral) and isinstance(stride[2], numbers.Integral)): raise base.IncompatibleShapeError( "Invalid stride: stride[1] and [2] must be integer.") self._stride = tuple(stride) else: self._stride = _fill_and_one_pad_stride(stride, 2) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) self._input_shape = None # Determined in build() from the input. self._input_channels = None # Determined in build() from the input. @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w_dw", "w_pw", "b"} if use_bias else {"w_dw", "w_pw"} def _build(self, inputs): """Connects the module into the graph, with input Tensor `inputs`. Args: inputs: A 4D Tensor of shape: [batch_size, input_height, input_width, input_channels]. Returns: A 4D Tensor of shape: [batch_size, output_height, output_width, output_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred input size does not match previous invocations. ValueError: If `channel_multiplier` * `input_channels` > `output_channels`, which means that the separable convolution is overparameterized. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions; or if the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 4: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_height, " "input_width, input_channels)") if self._input_shape[3] is None: raise base.IncompatibleShapeError( "Number of input channels must be known at module build time") self._input_channels = self._input_shape[3] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was " + inputs.dtype.name) depthwise_weight_shape = (self._kernel_shape[0], self._kernel_shape[1], self._input_channels, self._channel_multiplier) pointwise_input_size = self._channel_multiplier * self._input_channels pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels) bias_shape = (self._output_channels,) if "w_dw" not in self._initializers: fan_in_shape = depthwise_weight_shape[:3] self._initializers["w_dw"] = create_weight_initializer(fan_in_shape) if "w_pw" not in self._initializers: fan_in_shape = pointwise_weight_shape[:3] self._initializers["w_pw"] = create_weight_initializer(fan_in_shape) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w_dw = tf.get_variable( "w_dw", shape=depthwise_weight_shape, initializer=self._initializers["w_dw"], partitioner=self._partitioners.get("w_dw", None), regularizer=self._regularizers.get("w_dw", None)) self._w_pw = tf.get_variable( "w_pw", shape=pointwise_weight_shape, initializer=self._initializers["w_pw"], partitioner=self._partitioners.get("w_pw", None), regularizer=self._regularizers.get("w_pw", None)) outputs = tf.nn.separable_conv2d(inputs, self._w_dw, self._w_pw, strides=self._stride, padding=self._padding) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs @property def input_channels(self): """Returns the number of input channels.""" self._ensure_is_connected() return self._input_channels @property def output_channels(self): """Returns the number of output channels.""" return self._output_channels @property def channel_multiplier(self): """Returns the channel multiplier.""" return self._channel_multiplier @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" return self._stride @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w_dw(self): """Returns the Variable containing the depthwise weight matrix.""" self._ensure_is_connected() return self._w_dw @property def w_pw(self): """Returns the Variable containing the pointwise weight matrix.""" self._ensure_is_connected() return self._w_pw @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in SeparableConv2D Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers class Conv3D(base.AbstractModule): """Volumetric convolution module, including optional bias. This acts as a light wrapper around the TensorFlow op `tf.nn.conv3d`, abstracting away variable creation and sharing. """ def __init__(self, output_channels, kernel_shape, stride=1, rate=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="conv_3d"): """Constructs a Conv3D module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that output_channels can be called, returning an integer, when `build` is called. kernel_shape: Sequence of kernel sizes (of size 3), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 3), or integer that is used to define stride in all dimensions. rate: Sequence of dilation rates (of size 3), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard 2D convolution, `rate > 1` corresponds to dilated convolution. Cannot be > 1 if any of `stride` is also > 1. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). The default initializer for the weights is a truncated normal initializer, which is commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the bias is a zero initializer. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: base.IncompatibleShapeError: If the given kernel shape is not an integer; or if the given kernel shape is not a sequence of two integers. base.IncompatibleShapeError: If the given stride is not an integer; or if the given stride is not a sequence of two or four integers. base.IncompatibleShapeError: If the given rate is not an integer; or if the given rate is not a sequence of two integers. base.NotSupportedError: If rate in any dimension and the stride in any dimension are simultaneously > 1. ValueError: If the given padding is not `snt.VALID` or `snt.SAME`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(Conv3D, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels self._input_shape = None self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 3, "kernel") # The following is for backwards-compatibility from when we used to accept # 3-strides of the form [1, m, n, o, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 5: self._stride = tuple(stride)[1:-1] else: self._stride = _fill_and_verify_parameter_shape(stride, 3, "stride") self._rate = _fill_and_verify_parameter_shape(rate, 3, "rate") if any(x > 1 for x in self._stride) and any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot have stride > 1 with rate > 1") self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the Conv3D module into the graph, with input Tensor `inputs`. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final dimension (i.e. `input_channels`), in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 5D Tensor of shape `[batch_size, input_depth, input_height, input_width, input_channels]`. Returns: A 5D Tensor of shape `[batch_size, output_depth, output_height, output_width, output_channels]`. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.UnderspecifiedError: If the input tensor has an unknown `input_channels`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 5: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_depth, " "input_height, input_width, input_channels)") if self._input_shape[4] is None: raise base.UnderspecifiedError( "Number of input channels must be known at module build time") else: input_channels = self._input_shape[4] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError( "Input must have dtype tf.float32, but dtype was {}".format( inputs.dtype)) weight_shape = ( self._kernel_shape[0], self._kernel_shape[1], self._kernel_shape[2], input_channels, self.output_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:4]) if "b" not in self._initializers and self._use_bias: self._initializers["b"] = create_bias_initializer(bias_shape) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) outputs = tf.nn.convolution(inputs, self._w, strides=self._stride, padding=self._padding, dilation_rate=self._rate) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) return outputs @property def output_channels(self): """Returns the number of output channels.""" if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" # Backwards compatibility with old stride format. return (1,) + self._stride + (1,) @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias.""" self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in Conv2D Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the initializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers # Implements Transposable interface. def transpose(self, name=None): """Returns matching `Conv3DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv3DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1. """ if any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot transpose a dilated convolution module.") if name is None: name = self.module_name + "_transpose" return Conv3DTranspose(output_channels=lambda: self.input_shape[-1], output_shape=lambda: self.input_shape[1:-1], kernel_shape=self.kernel_shape, stride=self.stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, custom_getter=self._custom_getter, name=name) class Conv3DTranspose(base.AbstractModule, base.Transposable): """Volumetric transposed / reverse / up 3D convolution module, including bias. This acts as a light wrapper around the TensorFlow op `tf.nn.conv3d_transpose` abstracting away variable creation and sharing. """ def __init__(self, output_channels, output_shape=None, kernel_shape=None, stride=1, padding=SAME, use_bias=True, initializers=None, partitioners=None, regularizers=None, custom_getter=None, name="conv_3d_transpose"): """Constructs a `Conv3DTranspose` module. See the following documentation for an explanation of VALID versus SAME padding modes: https://www.tensorflow.org/api_guides/python/nn#Convolution Args: output_channels: Number of output channels. `output_channels` can be either a number or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure `output_channels` can be called, returning an integer, when `build` is called. output_shape: Output shape of transpose convolution. Can be either an iterable of integers or a callable. In the latter case, since the function invocation is deferred to graph construction time, the user must only ensure that `output_shape` can be called, returning an iterable of format `(out_depth, out_height, out_width)` when `build` is called. Note that `output_shape` defines the size of output signal domain, as opposed to the shape of the output `Tensor`. If a None value is given, a default shape is automatically calculated (see docstring of _default_transpose_size function for more details). kernel_shape: Sequence of kernel sizes (of size 3), or integer that is used to define kernel size in all dimensions. stride: Sequence of kernel strides (of size 3), or integer that is used to define stride in all dimensions. padding: Padding algorithm, either `snt.SAME` or `snt.VALID`. use_bias: Whether to include bias parameters. Default `True`. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: Name of the module. Raises: module.IncompatibleShapeError: If the given kernel shape is neither an integer nor a sequence of three integers. module.IncompatibleShapeError: If the given stride is neither an integer nor a sequence of three or five integers. ValueError: If the given padding is not `snt.VALID` or `snt.SAME`. ValueError: If the given kernel_shape is `None`. KeyError: If `initializers`, `partitioners` or `regularizers` contain any keys other than 'w' or 'b'. TypeError: If any of the given initializers, partitioners or regularizers are not callable. """ super(Conv3DTranspose, self).__init__(custom_getter=custom_getter, name=name) self._output_channels = output_channels if output_shape is None: self._output_shape = None self._use_default_output_shape = True else: self._use_default_output_shape = False if callable(output_shape): self._output_shape = output_shape else: self._output_shape = _fill_and_verify_parameter_shape(output_shape, 3, "output_shape") self._input_shape = None if kernel_shape is None: raise ValueError("`kernel_shape` cannot be None.") self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 3, "kernel") # We want to support passing native strides akin to [1, m, n, o, 1]. if isinstance(stride, collections.Iterable) and len(stride) == 5: if not stride[0] == stride[3] == 1: raise base.IncompatibleShapeError( "Invalid stride: First and last element must be 1.") self._stride = tuple(stride) else: self._stride = _fill_and_one_pad_stride(stride, 3) self._padding = _verify_padding(padding) self._use_bias = use_bias self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias) self._initializers = util.check_initializers( initializers, self.possible_keys) self._partitioners = util.check_partitioners( partitioners, self.possible_keys) self._regularizers = util.check_regularizers( regularizers, self.possible_keys) @classmethod def get_possible_initializer_keys(cls, use_bias=True): return {"w", "b"} if use_bias else {"w"} def _build(self, inputs): """Connects the Conv3DTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final dimension (i.e. `input_channels`), in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A 5D Tensor of shape [batch_size, input_depth, input_height, input_width, input_channels]. Returns: A 5D Tensor of shape [batch_size, output_depth, output_height, output_width, output_channels]. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. module.IncompatibleShapeError: If the input tensor has the wrong number of dimensions; or if the input tensor has an unknown `input_channels`; or or if `output_shape` is an iterable and is not in the format `(out_height, out_width)`. TypeError: If input Tensor dtype is not compatible with `tf.float32`. """ # Handle input whose shape is unknown during graph creation. self._input_shape = tuple(inputs.get_shape().as_list()) if len(self._input_shape) != 5: raise base.IncompatibleShapeError( "Input Tensor must have shape (batch_size, input_depth, " "input_height, input_width, input_channels)") if self._input_shape[4] is None: raise base.IncompatibleShapeError( "Number of input channels must be known at module build time") input_channels = self._input_shape[4] if not tf.float32.is_compatible_with(inputs.dtype): raise TypeError("Input must have dtype tf.float32, but dtype was " + inputs.dtype) if self._use_default_output_shape: self._output_shape = ( lambda: _default_transpose_size(self._input_shape[1:-1], # pylint: disable=g-long-lambda self.stride[1:-1], kernel_shape=self.kernel_shape, padding=self.padding)) if len(self.output_shape) != 3: raise base.IncompatibleShapeError("Output shape must be specified as " "(output_depth, output_height, " "output_width)") weight_shape = (self._kernel_shape[0], self._kernel_shape[1], self._kernel_shape[2], self.output_channels, input_channels) bias_shape = (self.output_channels,) if "w" not in self._initializers: fan_in = weight_shape[:3] + (weight_shape[4],) stddev = 1 / math.sqrt(np.prod(fan_in)) self._initializers["w"] = tf.truncated_normal_initializer(stddev=stddev) if "b" not in self._initializers and self._use_bias: stddev = 1 / math.sqrt(np.prod(bias_shape)) self._initializers["b"] = tf.truncated_normal_initializer(stddev=stddev) self._w = tf.get_variable("w", shape=weight_shape, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) # Use tensorflow shape op to manipulate inputs shape, so that unknown batch # size - which can happen when using input placeholders - is handled # correcly. batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) conv_output_shape = tf.convert_to_tensor( tuple(self.output_shape) + (self.output_channels,)) output_shape = tf.concat([batch_size, conv_output_shape], 0) outputs = tf.nn.conv3d_transpose(inputs, self._w, output_shape, strides=self._stride, padding=self._padding) if self._use_bias: self._b = tf.get_variable("b", shape=bias_shape, initializer=self._initializers["b"], partitioner=self._partitioners.get("b", None), regularizer=self._regularizers.get("b", None)) outputs = tf.nn.bias_add(outputs, self._b) # Recover output tensor shape value and pass it to set_shape in order to # enable shape inference. batch_size_value = inputs.get_shape()[0] output_shape_value = ((batch_size_value,) + self.output_shape + (self.output_channels,)) outputs.set_shape(output_shape_value) return outputs @property def output_channels(self): """Returns the number of output channels.""" if callable(self._output_channels): self._output_channels = self._output_channels() return self._output_channels @property def kernel_shape(self): """Returns the kernel shape.""" return self._kernel_shape @property def stride(self): """Returns the stride.""" return self._stride @property def output_shape(self): """Returns the output shape.""" if self._output_shape is None: self._ensure_is_connected() if callable(self._output_shape): self._output_shape = tuple(self._output_shape()) return self._output_shape @property def padding(self): """Returns the padding algorithm.""" return self._padding @property def w(self): """Returns the Variable containing the weight matrix.""" self._ensure_is_connected() return self._w @property def b(self): """Returns the Variable containing the bias. Returns: Variable object containing the bias, from the most recent __call__. Raises: module.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. AttributeError: If the module does not use bias. """ self._ensure_is_connected() if not self._use_bias: raise AttributeError( "No bias Variable in Conv3DTranspose Module when `use_bias=False`.") return self._b @property def has_bias(self): """Returns `True` if bias Variable is present in the module.""" return self._use_bias @property def initializers(self): """Returns the intializers dictionary.""" return self._initializers @property def partitioners(self): """Returns the partitioners dictionary.""" return self._partitioners @property def regularizers(self): """Returns the regularizers dictionary.""" return self._regularizers @property def input_shape(self): """Returns the input shape.""" self._ensure_is_connected() return self._input_shape # Implement Transposable interface def transpose(self, name=None): """Returns transposed Conv3DTranspose module, i.e. a Conv3D module.""" if name is None: name = self.module_name + "_transpose" return Conv3D(output_channels=lambda: self.input_shape[-1], kernel_shape=self.kernel_shape, stride=self.stride[1:-1], padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, custom_getter=self._custom_getter, name=name) """Forms to render HTML input & validate request data.""" from wtforms import Form, BooleanField, DateTimeField, PasswordField from wtforms import TextAreaField, TextField from wtforms.validators import Length, required class AppointmentForm(Form): """Render HTML input for Appointment model & validate submissions. This matches the models.Appointment class very closely. Where models.Appointment represents the domain and its persistence, this class represents how to display a form in HTML & accept/reject the results. """ title = TextField('Title', [Length(max=255)]) start = DateTimeField('Start', [required()]) end = DateTimeField('End') allday = BooleanField('All Day') location = TextField('Location', [Length(max=255)]) description = TextAreaField('Description') class LoginForm(Form): """Render HTML input for user login form. Authentication (i.e. password verification) happens in the view function. """ username = TextField('Username', [required()]) password = PasswordField('Password', [required()]) """Test script for ftplib module.""" # Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS # environment import ftplib import asyncore import asynchat import socket import io import errno import os import time try: import ssl except ImportError: ssl = None from unittest import TestCase, skipUnless from test import support from test.support import HOST, HOSTv6 threading = support.import_module('threading') TIMEOUT = 3 # the dummy data returned by server over the data channel when # RETR, LIST, NLST, MLSD commands are issued RETR_DATA = 'abcde12345\r\n' * 1000 LIST_DATA = 'foo\r\nbar\r\n' NLST_DATA = 'foo\r\nbar\r\n' MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n" "type=pdir;perm=e;unique==keVO1+d?3; ..\r\n" "type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n" "type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n" "type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n" "type=file;perm=awr;unique==keVO1+8G4; writable\r\n" "type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n" "type=dir;perm=;unique==keVO1+1t2; no-exec\r\n" "type=file;perm=r;unique==keVO1+EG4; two words\r\n" "type=file;perm=r;unique==keVO1+IH4; leading space\r\n" "type=file;perm=r;unique==keVO1+1G4; file1\r\n" "type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n" "type=file;perm=r;unique==keVO1+1G4; file2\r\n" "type=file;perm=r;unique==keVO1+1G4; file3\r\n" "type=file;perm=r;unique==keVO1+1G4; file4\r\n") class DummyDTPHandler(asynchat.async_chat): dtp_conn_closed = False def __init__(self, conn, baseclass): asynchat.async_chat.__init__(self, conn) self.baseclass = baseclass self.baseclass.last_received_data = '' def handle_read(self): self.baseclass.last_received_data += self.recv(1024).decode('ascii') def handle_close(self): # XXX: this method can be called many times in a row for a single # connection, including in clear-text (non-TLS) mode. # (behaviour witnessed with test_data_connection) if not self.dtp_conn_closed: self.baseclass.push('226 transfer complete') self.close() self.dtp_conn_closed = True def push(self, what): if self.baseclass.next_data is not None: what = self.baseclass.next_data self.baseclass.next_data = None if not what: return self.close_when_done() super(DummyDTPHandler, self).push(what.encode('ascii')) def handle_error(self): raise Exception class DummyFTPHandler(asynchat.async_chat): dtp_handler = DummyDTPHandler def __init__(self, conn): asynchat.async_chat.__init__(self, conn) # tells the socket to handle urgent data inline (ABOR command) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1) self.set_terminator(b"\r\n") self.in_buffer = [] self.dtp = None self.last_received_cmd = None self.last_received_data = '' self.next_response = '' self.next_data = None self.rest = None self.next_retr_data = RETR_DATA self.push('220 welcome') def collect_incoming_data(self, data): self.in_buffer.append(data) def found_terminator(self): line = b''.join(self.in_buffer).decode('ascii') self.in_buffer = [] if self.next_response: self.push(self.next_response) self.next_response = '' cmd = line.split(' ')[0].lower() self.last_received_cmd = cmd space = line.find(' ') if space != -1: arg = line[space + 1:] else: arg = "" if hasattr(self, 'cmd_' + cmd): method = getattr(self, 'cmd_' + cmd) method(arg) else: self.push('550 command "%s" not understood.' %cmd) def handle_error(self): raise Exception def push(self, data): asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n') def cmd_port(self, arg): addr = list(map(int, arg.split(','))) ip = '%d.%d.%d.%d' %tuple(addr[:4]) port = (addr[4] * 256) + addr[5] s = socket.create_connection((ip, port), timeout=TIMEOUT) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_pasv(self, arg): with socket.socket() as sock: sock.bind((self.socket.getsockname()[0], 0)) sock.listen() sock.settimeout(TIMEOUT) ip, port = sock.getsockname()[:2] ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256 self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2)) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_eprt(self, arg): af, ip, port = arg.split(arg[0])[1:-1] port = int(port) s = socket.create_connection((ip, port), timeout=TIMEOUT) self.dtp = self.dtp_handler(s, baseclass=self) self.push('200 active data connection established') def cmd_epsv(self, arg): with socket.socket(socket.AF_INET6) as sock: sock.bind((self.socket.getsockname()[0], 0)) sock.listen() sock.settimeout(TIMEOUT) port = sock.getsockname()[1] self.push('229 entering extended passive mode (|||%d|)' %port) conn, addr = sock.accept() self.dtp = self.dtp_handler(conn, baseclass=self) def cmd_echo(self, arg): # sends back the received string (used by the test suite) self.push(arg) def cmd_noop(self, arg): self.push('200 noop ok') def cmd_user(self, arg): self.push('331 username ok') def cmd_pass(self, arg): self.push('230 password ok') def cmd_acct(self, arg): self.push('230 acct ok') def cmd_rnfr(self, arg): self.push('350 rnfr ok') def cmd_rnto(self, arg): self.push('250 rnto ok') def cmd_dele(self, arg): self.push('250 dele ok') def cmd_cwd(self, arg): self.push('250 cwd ok') def cmd_size(self, arg): self.push('250 1000') def cmd_mkd(self, arg): self.push('257 "%s"' %arg) def cmd_rmd(self, arg): self.push('250 rmd ok') def cmd_pwd(self, arg): self.push('257 "pwd ok"') def cmd_type(self, arg): self.push('200 type ok') def cmd_quit(self, arg): self.push('221 quit ok') self.close() def cmd_abor(self, arg): self.push('226 abor ok') def cmd_stor(self, arg): self.push('125 stor ok') def cmd_rest(self, arg): self.rest = arg self.push('350 rest ok') def cmd_retr(self, arg): self.push('125 retr ok') if self.rest is not None: offset = int(self.rest) else: offset = 0 self.dtp.push(self.next_retr_data[offset:]) self.dtp.close_when_done() self.rest = None def cmd_list(self, arg): self.push('125 list ok') self.dtp.push(LIST_DATA) self.dtp.close_when_done() def cmd_nlst(self, arg): self.push('125 nlst ok') self.dtp.push(NLST_DATA) self.dtp.close_when_done() def cmd_opts(self, arg): self.push('200 opts ok') def cmd_mlsd(self, arg): self.push('125 mlsd ok') self.dtp.push(MLSD_DATA) self.dtp.close_when_done() def cmd_setlongretr(self, arg): # For testing. Next RETR will return long line. self.next_retr_data = 'x' * int(arg) self.push('125 setlongretr ok') class DummyFTPServer(asyncore.dispatcher, threading.Thread): handler = DummyFTPHandler def __init__(self, address, af=socket.AF_INET): threading.Thread.__init__(self) asyncore.dispatcher.__init__(self) self.create_socket(af, socket.SOCK_STREAM) self.bind(address) self.listen(5) self.active = False self.active_lock = threading.Lock() self.host, self.port = self.socket.getsockname()[:2] self.handler_instance = None def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: self.active_lock.acquire() asyncore.loop(timeout=0.1, count=1) self.active_lock.release() asyncore.close_all(ignore_all=True) def stop(self): assert self.active self.active = False self.join() def handle_accepted(self, conn, addr): self.handler_instance = self.handler(conn) def handle_connect(self): self.close() handle_read = handle_connect def writable(self): return 0 def handle_error(self): raise Exception if ssl is not None: CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem") CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem") class SSLConnection(asyncore.dispatcher): """An asyncore.dispatcher subclass supporting TLS/SSL.""" _ssl_accepting = False _ssl_closing = False def secure_connection(self): context = ssl.SSLContext() context.load_cert_chain(CERTFILE) socket = context.wrap_socket(self.socket, suppress_ragged_eofs=False, server_side=True, do_handshake_on_connect=False) self.del_channel() self.set_socket(socket) self._ssl_accepting = True def _do_ssl_handshake(self): try: self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def _do_ssl_shutdown(self): self._ssl_closing = True try: self.socket = self.socket.unwrap() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return except OSError as err: # Any "socket error" corresponds to a SSL_ERROR_SYSCALL return # from OpenSSL's SSL_shutdown(), corresponding to a # closed socket condition. See also: # http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html pass self._ssl_closing = False if getattr(self, '_ccc', False) is False: super(SSLConnection, self).close() else: pass def handle_read_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_read_event() def handle_write_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_write_event() def send(self, data): try: return super(SSLConnection, self).send(data) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return 0 raise def recv(self, buffer_size): try: return super(SSLConnection, self).recv(buffer_size) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return b'' if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): self.handle_close() return b'' raise def handle_error(self): raise Exception def close(self): if (isinstance(self.socket, ssl.SSLSocket) and self.socket._sslobj is not None): self._do_ssl_shutdown() else: super(SSLConnection, self).close() class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler): """A DummyDTPHandler subclass supporting TLS/SSL.""" def __init__(self, conn, baseclass): DummyDTPHandler.__init__(self, conn, baseclass) if self.baseclass.secure_data_channel: self.secure_connection() class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler): """A DummyFTPHandler subclass supporting TLS/SSL.""" dtp_handler = DummyTLS_DTPHandler def __init__(self, conn): DummyFTPHandler.__init__(self, conn) self.secure_data_channel = False self._ccc = False def cmd_auth(self, line): """Set up secure control channel.""" self.push('234 AUTH TLS successful') self.secure_connection() def cmd_ccc(self, line): self.push('220 Reverting back to clear-text') self._ccc = True self._do_ssl_shutdown() def cmd_pbsz(self, line): """Negotiate size of buffer for secure data transfer. For TLS/SSL the only valid value for the parameter is '0'. Any other value is accepted but ignored. """ self.push('200 PBSZ=0 successful.') def cmd_prot(self, line): """Setup un/secure data channel.""" arg = line.upper() if arg == 'C': self.push('200 Protection set to Clear') self.secure_data_channel = False elif arg == 'P': self.push('200 Protection set to Private') self.secure_data_channel = True else: self.push("502 Unrecognized PROT type (use C or P).") class DummyTLS_FTPServer(DummyFTPServer): handler = DummyTLS_FTPHandler class TestFTPClass(TestCase): def setUp(self): self.server = DummyFTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP(timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def check_data(self, received, expected): self.assertEqual(len(received), len(expected)) self.assertEqual(received, expected) def test_getwelcome(self): self.assertEqual(self.client.getwelcome(), '220 welcome') def test_sanitize(self): self.assertEqual(self.client.sanitize('foo'), repr('foo')) self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****')) self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****')) def test_exceptions(self): self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400') self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500') self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599') self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999') def test_all_errors(self): exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm, ftplib.error_proto, ftplib.Error, OSError, EOFError) for x in exceptions: try: raise x('exception not included in all_errors set') except ftplib.all_errors: pass def test_set_pasv(self): # passive mode is supposed to be enabled by default self.assertTrue(self.client.passiveserver) self.client.set_pasv(True) self.assertTrue(self.client.passiveserver) self.client.set_pasv(False) self.assertFalse(self.client.passiveserver) def test_voidcmd(self): self.client.voidcmd('echo 200') self.client.voidcmd('echo 299') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199') self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300') def test_login(self): self.client.login() def test_acct(self): self.client.acct('passwd') def test_rename(self): self.client.rename('a', 'b') self.server.handler_instance.next_response = '200' self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b') def test_delete(self): self.client.delete('foo') self.server.handler_instance.next_response = '199' self.assertRaises(ftplib.error_reply, self.client.delete, 'foo') def test_size(self): self.client.size('foo') def test_mkd(self): dir = self.client.mkd('/foo') self.assertEqual(dir, '/foo') def test_rmd(self): self.client.rmd('foo') def test_cwd(self): dir = self.client.cwd('/foo') self.assertEqual(dir, '250 cwd ok') def test_pwd(self): dir = self.client.pwd() self.assertEqual(dir, 'pwd ok') def test_quit(self): self.assertEqual(self.client.quit(), '221 quit ok') # Ensure the connection gets closed; sock attribute should be None self.assertEqual(self.client.sock, None) def test_abort(self): self.client.abort() def test_retrbinary(self): def callback(data): received.append(data.decode('ascii')) received = [] self.client.retrbinary('retr', callback) self.check_data(''.join(received), RETR_DATA) def test_retrbinary_rest(self): def callback(data): received.append(data.decode('ascii')) for rest in (0, 10, 20): received = [] self.client.retrbinary('retr', callback, rest=rest) self.check_data(''.join(received), RETR_DATA[rest:]) def test_retrlines(self): received = [] self.client.retrlines('retr', received.append) self.check_data(''.join(received), RETR_DATA.replace('\r\n', '')) def test_storbinary(self): f = io.BytesIO(RETR_DATA.encode('ascii')) self.client.storbinary('stor', f) self.check_data(self.server.handler_instance.last_received_data, RETR_DATA) # test new callback arg flag = [] f.seek(0) self.client.storbinary('stor', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) def test_storbinary_rest(self): f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii')) for r in (30, '30'): f.seek(0) self.client.storbinary('stor', f, rest=r) self.assertEqual(self.server.handler_instance.rest, str(r)) def test_storlines(self): f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii')) self.client.storlines('stor', f) self.check_data(self.server.handler_instance.last_received_data, RETR_DATA) # test new callback arg flag = [] f.seek(0) self.client.storlines('stor foo', f, callback=lambda x: flag.append(None)) self.assertTrue(flag) f = io.StringIO(RETR_DATA.replace('\r\n', '\n')) # storlines() expects a binary file, not a text file with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises(TypeError, self.client.storlines, 'stor foo', f) def test_nlst(self): self.client.nlst() self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1]) def test_dir(self): l = [] self.client.dir(lambda x: l.append(x)) self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', '')) def test_mlsd(self): list(self.client.mlsd()) list(self.client.mlsd(path='/')) list(self.client.mlsd(path='/', facts=['size', 'type'])) ls = list(self.client.mlsd()) for name, facts in ls: self.assertIsInstance(name, str) self.assertIsInstance(facts, dict) self.assertTrue(name) self.assertIn('type', facts) self.assertIn('perm', facts) self.assertIn('unique', facts) def set_data(data): self.server.handler_instance.next_data = data def test_entry(line, type=None, perm=None, unique=None, name=None): type = 'type' if type is None else type perm = 'perm' if perm is None else perm unique = 'unique' if unique is None else unique name = 'name' if name is None else name set_data(line) _name, facts = next(self.client.mlsd()) self.assertEqual(_name, name) self.assertEqual(facts['type'], type) self.assertEqual(facts['perm'], perm) self.assertEqual(facts['unique'], unique) # plain test_entry('type=type;perm=perm;unique=unique; name\r\n') # "=" in fact value test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe") test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type") test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe") test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====") # spaces in name test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me") test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ") test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name") test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e") # ";" in name test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me") test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name") test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;") test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;") # case sensitiveness set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n') _name, facts = next(self.client.mlsd()) for x in facts: self.assertTrue(x.islower()) # no data (directory empty) set_data('') self.assertRaises(StopIteration, next, self.client.mlsd()) set_data('') for x in self.client.mlsd(): self.fail("unexpected data %s" % x) def test_makeport(self): with self.client.makeport(): # IPv4 is in use, just make sure send_eprt has not been used self.assertEqual(self.server.handler_instance.last_received_cmd, 'port') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), timeout=TIMEOUT) conn.close() # IPv4 is in use, just make sure send_epsv has not been used self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv') def test_with_statement(self): self.client.quit() def is_client_connected(): if self.client.sock is None: return False try: self.client.sendcmd('noop') except (OSError, EOFError): return False return True # base test with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.assertTrue(is_client_connected()) self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) # QUIT sent inside the with block with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.client.quit() self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) # force a wrong response code to be sent on QUIT: error_perm # is expected and the connection is supposed to be closed try: with ftplib.FTP(timeout=TIMEOUT) as self.client: self.client.connect(self.server.host, self.server.port) self.client.sendcmd('noop') self.server.handler_instance.next_response = '550 error on quit' except ftplib.error_perm as err: self.assertEqual(str(err), '550 error on quit') else: self.fail('Exception not raised') # needed to give the threaded server some time to set the attribute # which otherwise would still be == 'noop' time.sleep(0.1) self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit') self.assertFalse(is_client_connected()) def test_source_address(self): self.client.quit() port = support.find_unused_port() try: self.client.connect(self.server.host, self.server.port, source_address=(HOST, port)) self.assertEqual(self.client.sock.getsockname()[1], port) self.client.quit() except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to port %d" % port) raise def test_source_address_passive_connection(self): port = support.find_unused_port() self.client.source_address = (HOST, port) try: with self.client.transfercmd('list') as sock: self.assertEqual(sock.getsockname()[1], port) except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to port %d" % port) raise def test_parse257(self): self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar') self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar') self.assertEqual(ftplib.parse257('257 ""'), '') self.assertEqual(ftplib.parse257('257 "" created'), '') self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"') # The 257 response is supposed to include the directory # name and in case it contains embedded double-quotes # they must be doubled (see RFC-959, chapter 7, appendix 2). self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar') self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar') def test_line_too_long(self): self.assertRaises(ftplib.Error, self.client.sendcmd, 'x' * self.client.maxline * 2) def test_retrlines_too_long(self): self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2)) received = [] self.assertRaises(ftplib.Error, self.client.retrlines, 'retr', received.append) def test_storlines_too_long(self): f = io.BytesIO(b'x' * self.client.maxline * 2) self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f) @skipUnless(support.IPV6_ENABLED, "IPv6 not enabled") class TestIPv6Environment(TestCase): def setUp(self): self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6) self.server.start() self.client = ftplib.FTP(timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def test_af(self): self.assertEqual(self.client.af, socket.AF_INET6) def test_makeport(self): with self.client.makeport(): self.assertEqual(self.server.handler_instance.last_received_cmd, 'eprt') def test_makepasv(self): host, port = self.client.makepasv() conn = socket.create_connection((host, port), timeout=TIMEOUT) conn.close() self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv') def test_transfer(self): def retr(): def callback(data): received.append(data.decode('ascii')) received = [] self.client.retrbinary('retr', callback) self.assertEqual(len(''.join(received)), len(RETR_DATA)) self.assertEqual(''.join(received), RETR_DATA) self.client.set_pasv(True) retr() self.client.set_pasv(False) retr() @skipUnless(ssl, "SSL not available") class TestTLS_FTPClassMixin(TestFTPClass): """Repeat TestFTPClass tests starting the TLS layer for both control and data connections first. """ def setUp(self): self.server = DummyTLS_FTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP_TLS(timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) # enable TLS self.client.auth() self.client.prot_p() @skipUnless(ssl, "SSL not available") class TestTLS_FTPClass(TestCase): """Specific TLS_FTP class tests.""" def setUp(self): self.server = DummyTLS_FTPServer((HOST, 0)) self.server.start() self.client = ftplib.FTP_TLS(timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) def tearDown(self): self.client.close() self.server.stop() def test_control_connection(self): self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.auth() self.assertIsInstance(self.client.sock, ssl.SSLSocket) def test_data_connection(self): # clear text with self.client.transfercmd('list') as sock: self.assertNotIsInstance(sock, ssl.SSLSocket) self.assertEqual(self.client.voidresp(), "226 transfer complete") # secured, after PROT P self.client.prot_p() with self.client.transfercmd('list') as sock: self.assertIsInstance(sock, ssl.SSLSocket) self.assertEqual(self.client.voidresp(), "226 transfer complete") # PROT C is issued, the connection must be in cleartext again self.client.prot_c() with self.client.transfercmd('list') as sock: self.assertNotIsInstance(sock, ssl.SSLSocket) self.assertEqual(self.client.voidresp(), "226 transfer complete") def test_login(self): # login() is supposed to implicitly secure the control connection self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.login() self.assertIsInstance(self.client.sock, ssl.SSLSocket) # make sure that AUTH TLS doesn't get issued again self.client.login() def test_auth_issued_twice(self): self.client.auth() self.assertRaises(ValueError, self.client.auth) def test_auth_ssl(self): try: self.client.ssl_version = ssl.PROTOCOL_SSLv23 self.client.auth() self.assertRaises(ValueError, self.client.auth) finally: self.client.ssl_version = ssl.PROTOCOL_TLSv1 def test_context(self): self.client.quit() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE, context=ctx) self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE, context=ctx) self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE, keyfile=CERTFILE, context=ctx) self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT) self.client.connect(self.server.host, self.server.port) self.assertNotIsInstance(self.client.sock, ssl.SSLSocket) self.client.auth() self.assertIs(self.client.sock.context, ctx) self.assertIsInstance(self.client.sock, ssl.SSLSocket) self.client.prot_p() with self.client.transfercmd('list') as sock: self.assertIs(sock.context, ctx) self.assertIsInstance(sock, ssl.SSLSocket) def test_ccc(self): self.assertRaises(ValueError, self.client.ccc) self.client.login(secure=True) self.assertIsInstance(self.client.sock, ssl.SSLSocket) self.client.ccc() self.assertRaises(ValueError, self.client.sock.unwrap) def test_check_hostname(self): self.client.quit() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.verify_mode = ssl.CERT_REQUIRED ctx.check_hostname = True ctx.load_verify_locations(CAFILE) self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT) # 127.0.0.1 doesn't match SAN self.client.connect(self.server.host, self.server.port) with self.assertRaises(ssl.CertificateError): self.client.auth() # exception quits connection self.client.connect(self.server.host, self.server.port) self.client.prot_p() with self.assertRaises(ssl.CertificateError): with self.client.transfercmd("list") as sock: pass self.client.quit() self.client.connect("localhost", self.server.port) self.client.auth() self.client.quit() self.client.connect("localhost", self.server.port) self.client.prot_p() with self.client.transfercmd("list") as sock: pass class TestTimeouts(TestCase): def setUp(self): self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(20) self.port = support.bind_port(self.sock) self.server_thread = threading.Thread(target=self.server) self.server_thread.start() # Wait for the server to be ready. self.evt.wait() self.evt.clear() self.old_port = ftplib.FTP.port ftplib.FTP.port = self.port def tearDown(self): ftplib.FTP.port = self.old_port self.server_thread.join() def server(self): # This method sets the evt 3 times: # 1) when the connection is ready to be accepted. # 2) when it is safe for the caller to close the connection # 3) when we have closed the socket self.sock.listen() # (1) Signal the caller that we are ready to accept the connection. self.evt.set() try: conn, addr = self.sock.accept() except socket.timeout: pass else: conn.sendall(b"1 Hola mundo\n") conn.shutdown(socket.SHUT_WR) # (2) Signal the caller that it is safe to close the socket. self.evt.set() conn.close() finally: self.sock.close() def testTimeoutDefault(self): # default -- use global socket timeout self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP(HOST) finally: socket.setdefaulttimeout(None) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutNone(self): # no timeout -- do not use global socket timeout self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: ftp = ftplib.FTP(HOST, timeout=None) finally: socket.setdefaulttimeout(None) self.assertIsNone(ftp.sock.gettimeout()) self.evt.wait() ftp.close() def testTimeoutValue(self): # a value ftp = ftplib.FTP(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutConnect(self): ftp = ftplib.FTP() ftp.connect(HOST, timeout=30) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDifferentOrder(self): ftp = ftplib.FTP(timeout=30) ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() def testTimeoutDirectAccess(self): ftp = ftplib.FTP() ftp.timeout = 30 ftp.connect(HOST) self.assertEqual(ftp.sock.gettimeout(), 30) self.evt.wait() ftp.close() class MiscTestCase(TestCase): def test__all__(self): blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF', 'Error', 'parse150', 'parse227', 'parse229', 'parse257', 'print_line', 'ftpcp', 'test'} support.check__all__(self, ftplib, blacklist=blacklist) def test_main(): tests = [TestFTPClass, TestTimeouts, TestIPv6Environment, TestTLS_FTPClassMixin, TestTLS_FTPClass, MiscTestCase] thread_info = support.threading_setup() try: support.run_unittest(*tests) finally: support.threading_cleanup(*thread_info) if __name__ == '__main__': test_main() # -*- coding: utf-8 -*- # Copyright 2014 Pierre de Buyl # # This file is part of pyh5md # # pyh5md is free software and is licensed under the modified BSD license (see # LICENSE file). import numpy as np import h5py from pyh5md.base import VL_STR, TimeData, FixedData, is_h5md from pyh5md.utils import create_compact_dataset class Box(h5py.Group): """Represents a simulation box.""" def __init__(self, parent, dimension=None, boundary=None, edges=None, time=False, unit=None, time_unit=None): """Initializes a simulation box.""" assert(len(boundary)==dimension) for b in boundary: assert(b in ['none', 'periodic']) if edges is None: if not all([b=='none' for b in boundary]): raise ValueError("Not all boundary elements are 'none' though edges is missing in set_box.") else: assert(len(edges)==dimension) if 'box' in parent.keys(): raise NotImplemented self._id = h5py.h5g.open(parent.id, 'box') else: self._id = h5py.h5g.create(parent.id, 'box') self.attrs['dimension'] = dimension self.attrs.create('boundary', data=boundary) if time: if edges is not None: self.edges = TimeData(self, 'edges', data=np.asarray(edges), unit=unit, time_unit=time_unit) else: if edges is not None: ds = create_compact_dataset(self, 'edges', data=edges) if unit is not None: assert isinstance(unit, str) ds.attrs.create('unit', data=unit) # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.utils.encoding import force_str, python_2_unicode_compatible # Levels DEBUG = 10 INFO = 20 WARNING = 30 ERROR = 40 CRITICAL = 50 @python_2_unicode_compatible class CheckMessage(object): def __init__(self, level, msg, hint=None, obj=None, id=None): assert isinstance(level, int), "The first argument should be level." self.level = level self.msg = msg self.hint = hint self.obj = obj self.id = id def __eq__(self, other): return all(getattr(self, attr) == getattr(other, attr) for attr in ['level', 'msg', 'hint', 'obj', 'id']) def __ne__(self, other): return not (self == other) def __str__(self): from django.db import models if self.obj is None: obj = "?" elif isinstance(self.obj, models.base.ModelBase): # We need to hardcode ModelBase and Field cases because its __str__ # method doesn't return "applabel.modellabel" and cannot be changed. obj = self.obj._meta.label else: obj = force_str(self.obj) id = "(%s) " % self.id if self.id else "" hint = "\n\tHINT: %s" % self.hint if self.hint else '' return "%s: %s%s%s" % (obj, id, self.msg, hint) def __repr__(self): return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \ (self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id) def is_serious(self): return self.level >= ERROR def is_silenced(self): from django.conf import settings return self.id in settings.SILENCED_SYSTEM_CHECKS class Debug(CheckMessage): def __init__(self, *args, **kwargs): super(Debug, self).__init__(DEBUG, *args, **kwargs) class Info(CheckMessage): def __init__(self, *args, **kwargs): super(Info, self).__init__(INFO, *args, **kwargs) class Warning(CheckMessage): def __init__(self, *args, **kwargs): super(Warning, self).__init__(WARNING, *args, **kwargs) class Error(CheckMessage): def __init__(self, *args, **kwargs): super(Error, self).__init__(ERROR, *args, **kwargs) class Critical(CheckMessage): def __init__(self, *args, **kwargs): super(Critical, self).__init__(CRITICAL, *args, **kwargs) # -*- mode: python -*- # Copyright: (c) 2012, Seth Vidal (@skvidal) # Copyright: Ansible Team # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: add_host short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory description: - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. - Takes variables so you can define the new hosts more fully. - This module is also supported for Windows targets. version_added: "0.9" options: name: description: - The hostname/ip of the host to add to the inventory, can include a colon and a port number. type: str required: true aliases: [ host, hostname ] groups: description: - The groups to add the hostname to. type: list aliases: [ group, groupname ] notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it to iterate use a with-loop construct. - The alias C(host) of the parameter C(name) is only available on Ansible 2.4 and newer. - Since Ansible 2.4, the C(inventory_dir) variable is now set to C(None) instead of the 'global inventory source', because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour. - Windows targets are supported by this module. seealso: - module: group_by author: - Ansible Core Team - Seth Vidal (@skvidal) ''' EXAMPLES = r''' - name: Add host to group 'just_created' with variable foo=42 add_host: name: '{{ ip_from_ec2 }}' groups: just_created foo: 42 - name: Add host to multiple groups add_host: hostname: '{{ new_ip }}' groups: - group1 - group2 - name: Add a host with a non-standard port local to your machines add_host: name: '{{ new_ip }}:{{ new_port }}' - name: Add a host alias that we reach through a tunnel (Ansible 1.9 and older) add_host: hostname: '{{ new_ip }}' ansible_ssh_host: '{{ inventory_hostname }}' ansible_ssh_port: '{{ new_port }}' - name: Add a host alias that we reach through a tunnel (Ansible 2.0 and newer) add_host: hostname: '{{ new_ip }}' ansible_host: '{{ inventory_hostname }}' ansible_port: '{{ new_port }}' - name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre Ansible 2.4 behaviour) add_host: hostname: charlie inventory_dir: '{{ inventory_dir }}' ''' # Copyright (c) 2004-2011 Moxie Marlinspike # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA # import logging import string class CookieCleaner: '''This class cleans cookies we haven't seen before. The basic idea is to kill sessions, which isn't entirely straight-forward. Since we want this to be generalized, there's no way for us to know exactly what cookie we're trying to kill, which also means we don't know what domain or path it has been set for. The rule with cookies is that specific overrides general. So cookies that are set for mail.foo.com override cookies with the same name that are set for .foo.com, just as cookies that are set for foo.com/mail override cookies with the same name that are set for foo.com/ The best we can do is guess, so we just try to cover our bases by expiring cookies in a few different ways. The most obvious thing to do is look for individual cookies and nail the ones we haven't seen coming from the server, but the problem is that cookies are often set by Javascript instead of a Set-Cookie header, and if we block those the site will think cookies are disabled in the browser. So we do the expirations and whitlisting based on client,server tuples. The first time a client hits a server, we kill whatever cookies we see then. After that, we just let them through. Not perfect, but pretty effective. ''' _instance = None def getInstance(): if CookieCleaner._instance == None: CookieCleaner._instance = CookieCleaner() return CookieCleaner._instance getInstance = staticmethod(getInstance) def __init__(self): self.cleanedCookies = set(); self.enabled = False def setEnabled(self, enabled): self.enabled = enabled def isClean(self, method, client, host, headers): if method == "POST": return True if not self.enabled: return True if not self.hasCookies(headers): return True return (client, self.getDomainFor(host)) in self.cleanedCookies def getExpireHeaders(self, method, client, host, headers, path): domain = self.getDomainFor(host) self.cleanedCookies.add((client, domain)) expireHeaders = [] for cookie in headers['cookie'].split(";"): cookie = cookie.split("=")[0].strip() expireHeadersForCookie = self.getExpireCookieStringFor(cookie, host, domain, path) expireHeaders.extend(expireHeadersForCookie) return expireHeaders def hasCookies(self, headers): return 'cookie' in headers def getDomainFor(self, host): hostParts = host.split(".") return "." + hostParts[-2] + "." + hostParts[-1] def getExpireCookieStringFor(self, cookie, host, domain, path): pathList = path.split("/") expireStrings = list() expireStrings.append(cookie + "=" + "EXPIRED;Path=/;Domain=" + domain + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n") expireStrings.append(cookie + "=" + "EXPIRED;Path=/;Domain=" + host + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n") if len(pathList) > 2: expireStrings.append(cookie + "=" + "EXPIRED;Path=/" + pathList[1] + ";Domain=" + domain + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n") expireStrings.append(cookie + "=" + "EXPIRED;Path=/" + pathList[1] + ";Domain=" + host + ";Expires=Mon, 01-Jan-1990 00:00:00 GMT\r\n") return expireStrings #!/usr/bin/env python import os import platform import subprocess import sys from lib.config import get_target_arch, PLATFORM from lib.util import get_host_arch SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) def main(): os.chdir(SOURCE_ROOT) if PLATFORM != 'win32' and platform.architecture()[0] != '64bit': print 'Electron is required to be built on a 64bit machine' return 1 update_external_binaries() return update_gyp() def update_external_binaries(): uf = os.path.join('script', 'update-external-binaries.py') subprocess.check_call([sys.executable, uf]) def update_gyp(): # Since gyp doesn't support specify link_settings for each configuration, # we are not able to link to different libraries in "Debug" and "Release" # configurations. # In order to work around this, we decided to generate the configuration # for twice, one is to generate "Debug" config, the other one to generate # the "Release" config. And the settings are controlled by the variable # "libchromiumcontent_component" which is defined before running gyp. target_arch = get_target_arch() return (run_gyp(target_arch, 0) or run_gyp(target_arch, 1)) def run_gyp(target_arch, component): env = os.environ.copy() if PLATFORM == 'linux' and target_arch != get_host_arch(): env['GYP_CROSSCOMPILE'] = '1' elif PLATFORM == 'win32': env['GYP_MSVS_VERSION'] = '2013' python = sys.executable if sys.platform == 'cygwin': # Force using win32 python on cygwin. python = os.path.join('vendor', 'python_26', 'python.exe') gyp = os.path.join('vendor', 'brightray', 'vendor', 'gyp', 'gyp_main.py') gyp_pylib = os.path.join(os.path.dirname(gyp), 'pylib') # Avoid using the old gyp lib in system. env['PYTHONPATH'] = os.path.pathsep.join([gyp_pylib, env.get('PYTHONPATH', '')]) defines = [ '-Dlibchromiumcontent_component={0}'.format(component), '-Dtarget_arch={0}'.format(target_arch), '-Dhost_arch={0}'.format(get_host_arch()), '-Dlibrary=static_library', ] return subprocess.call([python, gyp, '-f', 'ninja', '--depth', '.', 'atom.gyp', '-Icommon.gypi'] + defines, env=env) if __name__ == '__main__': sys.exit(main()) import unittest from biicode.common.model.server_info import ServerInfo, ClientVersion class ServerInfoTest(unittest.TestCase): def test_invalid_msg(self): server_info = ServerInfo(message=u'\xa0') self.assertNotEquals(server_info.messages, '\xa0') def test_serialize(self): sut = ServerInfo() sut.download_url = 'https://www.biicode.com/downloads' serial = sut.serialize() deserialized = ServerInfo.deserialize(serial) self.assertEquals(sut, deserialized) class ClientVersionTest(unittest.TestCase): def test_compare(self): self.assertEqual(ClientVersion('develop'), ClientVersion('develop')) self.assertFalse(ClientVersion('1.2') > ClientVersion('develop')) self.assertGreater(ClientVersion('develop'), ClientVersion('1.2')) self.assertLess(ClientVersion('1.2'), ClientVersion('develop')) self.assertGreater(ClientVersion('1.3'), ClientVersion('1.2')) self.assertLess(ClientVersion('1.2'), ClientVersion('1.3')) self.assertGreater(ClientVersion('1.3.5'), ClientVersion('1.3')) self.assertLess(ClientVersion('1.3'), ClientVersion('1.3.5')) self.assertFalse(ClientVersion('0.1.13.2') > ClientVersion('0.2')) self.assertFalse(ClientVersion('0.2') < ClientVersion('0.1.13.2')) # Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import logging import optparse import os import shlex import socket import sys from telemetry.core import browser_finder from telemetry.core import browser_finder_exceptions from telemetry.core import device_finder from telemetry.core import platform from telemetry.core import profile_types from telemetry.core import util from telemetry.core import wpr_modes from telemetry.core.platform.profiler import profiler_finder util.AddDirToPythonPath( util.GetChromiumSrcDir(), 'third_party', 'webpagereplay') import net_configs # pylint: disable=F0401 class BrowserFinderOptions(optparse.Values): """Options to be used for discovering a browser.""" def __init__(self, browser_type=None): optparse.Values.__init__(self) self.browser_type = browser_type self.browser_executable = None self.chrome_root = None self.device = None self.cros_ssh_identity = None self.extensions_to_load = [] # If set, copy the generated profile to this path on exit. self.output_profile_path = None self.cros_remote = None self.profiler = None self.verbosity = 0 self.browser_options = BrowserOptions() self.output_file = None self.android_rndis = False self.no_performance_mode = False def __repr__(self): return str(sorted(self.__dict__.items())) def Copy(self): return copy.deepcopy(self) def CreateParser(self, *args, **kwargs): parser = optparse.OptionParser(*args, **kwargs) # Selection group group = optparse.OptionGroup(parser, 'Which browser to use') group.add_option('--browser', dest='browser_type', default=None, help='Browser type to run, ' 'in order of priority. Supported values: list,%s' % ','.join(browser_finder.FindAllBrowserTypes(self))) group.add_option('--browser-executable', dest='browser_executable', help='The exact browser to run.') group.add_option('--chrome-root', dest='chrome_root', help='Where to look for chrome builds.' 'Defaults to searching parent dirs by default.') group.add_option('--device', dest='device', help='The device ID to use.' 'If not specified, only 0 or 1 connected devices are supported. If' 'specified as "android", all available Android devices are used.') group.add_option('--target-arch', dest='target_arch', help='The target architecture of the browser. Options available are: ' 'x64, x86_64, arm, arm64 and mips. ' 'Defaults to the default architecture of the platform if omitted.') group.add_option( '--remote', dest='cros_remote', help='The hostname of a remote ChromeOS device to use.') group.add_option( '--remote-ssh-port', type=int, default=socket.getservbyname('ssh'), dest='cros_remote_ssh_port', help='The SSH port of the remote ChromeOS device (requires --remote).') identity = None testing_rsa = os.path.join( util.GetChromiumSrcDir(), 'third_party', 'chromite', 'ssh_keys', 'testing_rsa') if os.path.exists(testing_rsa): identity = testing_rsa group.add_option('--identity', dest='cros_ssh_identity', default=identity, help='The identity file to use when ssh\'ing into the ChromeOS device') parser.add_option_group(group) # Debugging options group = optparse.OptionGroup(parser, 'When things go wrong') profiler_choices = profiler_finder.GetAllAvailableProfilers() group.add_option( '--profiler', default=None, type='choice', choices=profiler_choices, help='Record profiling data using this tool. Supported values: ' + ', '.join(profiler_choices)) group.add_option( '--interactive', dest='interactive', action='store_true', help='Let the user interact with the page; the actions specified for ' 'the page are not run.') group.add_option( '-v', '--verbose', action='count', dest='verbosity', help='Increase verbosity level (repeat as needed)') group.add_option('--print-bootstrap-deps', action='store_true', help='Output bootstrap deps list.') parser.add_option_group(group) # Platform options group = optparse.OptionGroup(parser, 'Platform options') group.add_option('--no-performance-mode', action='store_true', help='Some platforms run on "full performance mode" where the ' 'test is executed at maximum CPU speed in order to minimize noise ' '(specially important for dashboards / continuous builds). ' 'This option prevents Telemetry from tweaking such platform settings.') group.add_option('--android-rndis', dest='android_rndis', default=False, action='store_true', help='Use RNDIS forwarding on Android.') group.add_option('--no-android-rndis', dest='android_rndis', action='store_false', help='Do not use RNDIS forwarding on Android.' ' [default]') parser.add_option_group(group) # Browser options. self.browser_options.AddCommandLineArgs(parser) real_parse = parser.parse_args def ParseArgs(args=None): defaults = parser.get_default_values() for k, v in defaults.__dict__.items(): if k in self.__dict__ and self.__dict__[k] != None: continue self.__dict__[k] = v ret = real_parse(args, self) # pylint: disable=E1121 if self.verbosity >= 2: logging.getLogger().setLevel(logging.DEBUG) elif self.verbosity: logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if self.device == 'list': devices = device_finder.GetDevicesMatchingOptions(self) print 'Available devices:' for device in devices: print ' ', device.name sys.exit(0) if self.browser_executable and not self.browser_type: self.browser_type = 'exact' if self.browser_type == 'list': devices = device_finder.GetDevicesMatchingOptions(self) if not devices: sys.exit(0) browser_types = {} for device in devices: try: possible_browsers = browser_finder.GetAllAvailableBrowsers(self, device) browser_types[device.name] = sorted( [browser.browser_type for browser in possible_browsers]) except browser_finder_exceptions.BrowserFinderException as ex: print >> sys.stderr, 'ERROR: ', ex sys.exit(1) print 'Available browsers:' if len(browser_types) == 0: print ' No devices were found.' for device_name in sorted(browser_types.keys()): print ' ', device_name for browser_type in browser_types[device_name]: print ' ', browser_type sys.exit(0) # Parse browser options. self.browser_options.UpdateFromParseResults(self) return ret parser.parse_args = ParseArgs return parser def AppendExtraBrowserArgs(self, args): self.browser_options.AppendExtraBrowserArgs(args) def MergeDefaultValues(self, defaults): for k, v in defaults.__dict__.items(): self.ensure_value(k, v) class BrowserOptions(object): """Options to be used for launching a browser.""" def __init__(self): self.browser_type = None self.show_stdout = False # When set to True, the browser will use the default profile. Telemetry # will not provide an alternate profile directory. self.dont_override_profile = False self.profile_dir = None self.profile_type = None self._extra_browser_args = set() self.extra_wpr_args = [] self.wpr_mode = wpr_modes.WPR_OFF self.netsim = None self.disable_background_networking = True self.no_proxy_server = False self.browser_user_agent_type = None self.clear_sytem_cache_for_browser_and_profile_on_start = False self.startup_url = 'about:blank' # Background pages of built-in component extensions can interfere with # performance measurements. self.disable_component_extensions_with_background_pages = True # Disable default apps. self.disable_default_apps = True # Whether to use the new code path for choosing an ephemeral port for # DevTools. The bots set this to true. When Chrome 37 reaches stable, # remove this setting and the old code path. http://crbug.com/379980 self.use_devtools_active_port = False def __repr__(self): return str(sorted(self.__dict__.items())) def IsCrosBrowserOptions(self): return False @classmethod def AddCommandLineArgs(cls, parser): ############################################################################ # Please do not add any more options here without first discussing with # # a telemetry owner. This is not the right place for platform-specific # # options. # ############################################################################ group = optparse.OptionGroup(parser, 'Browser options') profile_choices = profile_types.GetProfileTypes() group.add_option('--profile-type', dest='profile_type', type='choice', default='clean', choices=profile_choices, help=('The user profile to use. A clean profile is used by default. ' 'Supported values: ' + ', '.join(profile_choices))) group.add_option('--profile-dir', dest='profile_dir', help='Profile directory to launch the browser with. ' 'A clean profile is used by default') group.add_option('--extra-browser-args', dest='extra_browser_args_as_string', help='Additional arguments to pass to the browser when it starts') group.add_option('--extra-wpr-args', dest='extra_wpr_args_as_string', help=('Additional arguments to pass to Web Page Replay. ' 'See third_party/webpagereplay/replay.py for usage.')) group.add_option('--netsim', default=None, type='choice', choices=net_configs.NET_CONFIG_NAMES, help=('Run benchmark under simulated network conditions. ' 'Will prompt for sudo. Supported values: ' + ', '.join(net_configs.NET_CONFIG_NAMES))) group.add_option('--show-stdout', action='store_true', help='When possible, will display the stdout of the process') # This hidden option is to be removed, and the older code path deleted, # once Chrome 37 reaches Stable. http://crbug.com/379980 group.add_option('--use-devtools-active-port', action='store_true', help=optparse.SUPPRESS_HELP) parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Compatibility options') group.add_option('--gtest_output', help='Ignored argument for compatibility with runtest.py harness') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Synthetic gesture options') synthetic_gesture_source_type_choices = ['default', 'mouse', 'touch'] group.add_option('--synthetic-gesture-source-type', dest='synthetic_gesture_source_type', default='default', type='choice', choices=synthetic_gesture_source_type_choices, help='Specify the source type for synthtic gestures. Note that some ' + 'actions only support a specific source type. ' + 'Supported values: ' + ', '.join(synthetic_gesture_source_type_choices)) parser.add_option_group(group) def UpdateFromParseResults(self, finder_options): """Copies our options from finder_options""" browser_options_list = [ 'extra_browser_args_as_string', 'extra_wpr_args_as_string', 'netsim', 'profile_dir', 'profile_type', 'show_stdout', 'synthetic_gesture_source_type', 'use_devtools_active_port', ] for o in browser_options_list: a = getattr(finder_options, o, None) if a is not None: setattr(self, o, a) delattr(finder_options, o) self.browser_type = finder_options.browser_type if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101 tmp = shlex.split( self.extra_browser_args_as_string) # pylint: disable=E1101 self.AppendExtraBrowserArgs(tmp) delattr(self, 'extra_browser_args_as_string') if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101 tmp = shlex.split( self.extra_wpr_args_as_string) # pylint: disable=E1101 self.extra_wpr_args.extend(tmp) delattr(self, 'extra_wpr_args_as_string') if self.profile_type == 'default': self.dont_override_profile = True if self.profile_dir and self.profile_type != 'clean': logging.critical( "It's illegal to specify both --profile-type and --profile-dir.\n" "For more information see: http://goo.gl/ngdGD5") sys.exit(1) if self.profile_dir and not os.path.isdir(self.profile_dir): logging.critical( "Directory specified by --profile-dir (%s) doesn't exist " "or isn't a directory.\n" "For more information see: http://goo.gl/ngdGD5" % self.profile_dir) sys.exit(1) if not self.profile_dir: self.profile_dir = profile_types.GetProfileDir(self.profile_type) # This deferred import is necessary because browser_options is imported in # telemetry/telemetry/__init__.py. finder_options.browser_options = CreateChromeBrowserOptions(self) @property def extra_browser_args(self): return self._extra_browser_args def AppendExtraBrowserArgs(self, args): if isinstance(args, list): self._extra_browser_args.update(args) else: self._extra_browser_args.add(args) def CreateChromeBrowserOptions(br_options): browser_type = br_options.browser_type if (platform.GetHostPlatform().GetOSName() == 'chromeos' or (browser_type and browser_type.startswith('cros'))): return CrosBrowserOptions(br_options) return br_options class ChromeBrowserOptions(BrowserOptions): """Chrome-specific browser options.""" def __init__(self, br_options): super(ChromeBrowserOptions, self).__init__() # Copy to self. self.__dict__.update(br_options.__dict__) class CrosBrowserOptions(ChromeBrowserOptions): """ChromeOS-specific browser options.""" def __init__(self, br_options): super(CrosBrowserOptions, self).__init__(br_options) # Create a browser with oobe property. self.create_browser_with_oobe = False # Clear enterprise policy before logging in. self.clear_enterprise_policy = True # Disable GAIA/enterprise services. self.disable_gaia_services = True self.auto_login = True self.gaia_login = False self.username = 'test@test.test' self.password = '' def IsCrosBrowserOptions(self): return True # Copyright (c) 2013 Institute of the Czech National Corpus # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # dated June, 1991. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. from manatee import Corpus class EmptyCorpus: """ EmptyCorpus serves as kind of a fake corpus to keep KonText operational in some special cases (= cases where we do not need any corpus to be instantiated which is a situation original Bonito code probably never count with). """ def __init__(self, corpname: str = ''): self._corpname = corpname self._conf = { 'ENCODING': 'UTF-8', 'NAME': self.corpname, 'ATTRLIST': '', 'STRUCTLIST': '' } @property def corpname(self): return self._corpname @property def spath(self): return None @property def subcname(self): return None @property def subchash(self): return None @property def created(self): return None @property def is_published(self): return False @property def orig_spath(self): return None @property def orig_subcname(self): return None @property def author(self): return None @property def author_id(self): return -1 @property def description(self): return None def get_conf(self, param): return self._conf.get(param, '') def get_confpath(self, *args, **kwargs): return None def get_conffile(self, *args, **kwargs): return None def set_default_attr(self, *args, **kwargs): pass @property def size(self): return 0 @property def search_size(self): return 0 def get_struct(self, *args, **kwargs): pass def get_attr(self, *args, **kwargs): pass def get_info(self, *args, **kwargs): pass def unwrap(self) -> Corpus: return None def freq_dist(self, rs, crit, limit, words, freqs, norms): pass def filter_query(self, *args, **kwargs): pass def is_subcorpus(self): return False def save_subc_description(self, desc: str): pass def freq_precalc_file(self, attrname: str): return None @property def corp_mtime(self): return -1 class ErrorCorpus(EmptyCorpus): """ This type is used in case we encounter a corpus-initialization error and yet we still need proper template/etc. variables initialized (e.g. user visits URL containing non-existing sub-corpus) """ def __init__(self, err): """ arguments: err -- an error which caused that the original corpus failed to initialize """ super(ErrorCorpus, self).__init__() self._error = err def get_error(self): """ returns original error """ return self._error def is_subcorpus(self): return False # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.osv import fields, osv class account_partner_balance(osv.osv_memory): """ This wizard will provide the partner balance report by periods, between any two dates. """ _inherit = 'account.common.partner.report' _name = 'account.partner.balance' _description = 'Print Account Partner Balance' _columns = { 'display_partner': fields.selection([('non-zero_balance', 'With balance is not equal to 0'), ('all', 'All Partners')] ,'Display Partners'), 'journal_ids': fields.many2many('account.journal', 'account_partner_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True), } _defaults = { # 'initial_balance': True, 'display_partner': 'non-zero_balance', } def _print_report(self, cr, uid, ids, data, context=None): if context is None: context = {} data = self.pre_print_report(cr, uid, ids, data, context=context) data['form'].update(self.read(cr, uid, ids, ['display_partner'])[0]) return { 'type': 'ir.actions.report.xml', 'report_name': 'account.partner.balance', 'datas': data, } account_partner_balance() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # -*- coding: utf-8 -*- ############################################################################## # # Author: Nicolas Bessi. Copyright Camptocamp SA # Financial contributors: Hasa SA, Open Net SA, # Prisme Solutions Informatique SA, Quod SA # # Translation contributors: brain-tec AG, Agile Business Group # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.osv.orm import TransientModel class WizardMultiChartsAccounts(TransientModel): _inherit ='wizard.multi.charts.accounts' def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None): if context is None: context = {} res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids, chart_template_id=chart_template_id, context=context) # 0 is evaluated as False in python so we have to do this # because original wizard test code_digits value on a float widget if chart_template_id: sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template') if sterchi_template.id == chart_template_id: res['value']['code_digits'] = 0 return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: #!/usr/bin/python # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: os_user_group short_description: Associate OpenStack Identity users and groups extends_documentation_fragment: openstack version_added: "2.0" author: "Monty Taylor (@emonty)" description: - Add and remove users from groups options: user: description: - Name or id for the user required: true group: description: - Name or id for the group. required: true state: description: - Should the user be present or absent in the group choices: [present, absent] default: present availability_zone: description: - Ignored. Present for backwards compatibility required: false requirements: - "python >= 2.7" - "openstacksdk" ''' EXAMPLES = ''' # Add the demo user to the demo group - os_user_group: cloud: mycloud user: demo group: demo ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module def _system_state_change(state, in_group): if state == 'present' and not in_group: return True if state == 'absent' and in_group: return True return False def main(): argument_spec = openstack_full_argument_spec( user=dict(required=True), group=dict(required=True), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) user = module.params['user'] group = module.params['group'] state = module.params['state'] sdk, cloud = openstack_cloud_from_module(module) try: in_group = cloud.is_user_in_group(user, group) if module.check_mode: module.exit_json(changed=_system_state_change(state, in_group)) changed = False if state == 'present': if not in_group: cloud.add_user_to_group(user, group) changed = True elif state == 'absent': if in_group: cloud.remove_user_from_group(user, group) changed = True module.exit_json(changed=changed) except sdk.exceptions.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data) if __name__ == '__main__': main() # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## import time from openerp.osv import osv from openerp.report import report_sxw class account_statement(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(account_statement, self).__init__(cr, uid, name, context=context) self.total = 0.0 self.localcontext.update({ 'time': time, 'get_total': self._get_total, 'get_data': self._get_data, }) def _get_data(self, statement): lines = [] for line in statement.line_ids: lines.append(line) return lines def _get_total(self, statement_line_ids): total = 0.0 for line in statement_line_ids: total += line.amount return total class report_account_statement(osv.AbstractModel): _name = 'report.point_of_sale.report_statement' _inherit = 'report.abstract_report' _template = 'point_of_sale.report_statement' _wrapped_report_class = account_statement # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2016 Eugene Frolov # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from restalchemy.api import routes from restalchemy.tests.functional.restapi.ra_based.microservice import ( controllers) class PortRoute(routes.Route): __controller__ = controllers.PortController __allow_methods__ = [routes.CREATE, routes.FILTER, routes.GET, routes.DELETE] class VMPowerOnAction(routes.Action): __controller__ = controllers.VMController class VMPowerOffAction(routes.Action): __controller__ = controllers.VMController class VMRoute(routes.Route): __controller__ = controllers.VMController __allow_methods__ = [routes.CREATE, routes.GET, routes.DELETE, routes.FILTER, routes.UPDATE] poweron = routes.action(VMPowerOnAction, invoke=True) poweroff = routes.action(VMPowerOffAction, invoke=True) ports = routes.route(PortRoute, resource_route=True) class V1Route(routes.Route): __controller__ = controllers.V1Controller __allow_methods__ = [routes.FILTER] vms = routes.route(VMRoute) class Root(routes.Route): __controller__ = controllers.RootController __allow_methods__ = [routes.FILTER] v1 = routes.route(V1Route) import logging import time from autotest.client.shared import error from virttest import virsh from provider import libvirt_version def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): raise error.TestNAError("This version of libvirt does not support " "the send-key test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 2)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = 'testacl' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # Is 'rsyslog' installed on guest? It'll be what writes out # to /var/log/messages rpm_stat = session.cmd_status("rpm -q rsyslog") if rpm_stat != 0: logging.debug("rsyslog not found in guest installing") stat_install = session.cmd_status("yum install -y rsyslog", 300) if stat_install != 0: raise error.TestFail("Fail to install rsyslog, make" "sure that you have usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > /var/log/messages") session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: raise error.TestFail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty1 started tty1_stat = "ps aux|grep tty[1]" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty1_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: raise error.TestFail("Can not wait for tty1 started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: raise error.TestFail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: raise error.TestFail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: raise error.TestFail("Fail to create file with send key, " "Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*HELP'") elif "KEY_M" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") elif "KEY_B" in options: client_session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 client_session.close() if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages") time.sleep(1) timeout = timeout - 1 if get_status != 0: raise error.TestFail("SysRq does not take effect in guest, " "options is %s" % options) else: logging.info("Succeed to send SysRq command") else: raise error.TestFail("Test cfg file invalid: either sysrq_params " "or create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close() # Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from os_windows.utils import rdpconsoleutilsv2 class RDPConsoleUtilsV2TestCase(base.BaseTestCase): _FAKE_RDP_PORT = 1000 def setUp(self): self._rdpconsoleutils = rdpconsoleutilsv2.RDPConsoleUtilsV2() self._rdpconsoleutils._conn = mock.MagicMock() super(RDPConsoleUtilsV2TestCase, self).setUp() def test_get_rdp_console_port(self): conn = self._rdpconsoleutils._conn mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0] mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT listener_port = self._rdpconsoleutils.get_rdp_console_port() self.assertEqual(self._FAKE_RDP_PORT, listener_port) # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## import render import rml2pdf import rml2html as htmlizer import rml2txt as txtizer import odt2odt as odt import html2html as html import makohtml2html as makohtml class rml(render.render): def __init__(self, rml, localcontext = None, datas=None, path='.', title=None): render.render.__init__(self, datas, path) self.localcontext = localcontext self.rml = rml self.output_type = 'pdf' self.title=title def _render(self): return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title) class rml2html(render.render): def __init__(self, rml,localcontext = None, datas=None): super(rml2html, self).__init__(datas) self.rml = rml self.localcontext = localcontext self.output_type = 'html' def _render(self): return htmlizer.parseString(self.rml,self.localcontext) class rml2txt(render.render): def __init__(self, rml, localcontext= None, datas=None): super(rml2txt, self).__init__(datas) self.rml = rml self.localcontext = localcontext self.output_type = 'txt' def _render(self): return txtizer.parseString(self.rml, self.localcontext) class odt2odt(render.render): def __init__(self, rml, localcontext=None, datas=None): render.render.__init__(self, datas) self.rml_dom = rml self.localcontext = localcontext self.output_type = 'odt' def _render(self): return odt.parseNode(self.rml_dom,self.localcontext) class html2html(render.render): def __init__(self, rml, localcontext=None, datas=None): render.render.__init__(self, datas) self.rml_dom = rml self.localcontext = localcontext self.output_type = 'html' def _render(self): return html.parseString(self.rml_dom,self.localcontext) class makohtml2html(render.render): def __init__(self, html, localcontext = None): render.render.__init__(self) self.html = html self.localcontext = localcontext self.output_type = 'html' def _render(self): return makohtml.parseNode(self.html,self.localcontext) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: from __future__ import unicode_literals import fnmatch import glob import io import os import re import sys from functools import total_ordering from itertools import dropwhile import django from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files.temp import NamedTemporaryFile from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import ( find_command, handle_extensions, popen_wrapper, ) from django.utils._os import upath from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str from django.utils.functional import cached_property from django.utils.jslex import prepare_js_for_gettext from django.utils.text import get_text_list from django.utils.translation import templatize plural_forms_re = re.compile(r'^(?P"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL) STATUS_OK = 0 NO_LOCALE_DIR = object() def check_programs(*programs): for program in programs: if find_command(program) is None: raise CommandError( "Can't find %s. Make sure you have GNU gettext tools 0.15 or " "newer installed." % program ) @total_ordering class TranslatableFile(object): def __init__(self, dirpath, file_name, locale_dir): self.file = file_name self.dirpath = dirpath self.locale_dir = locale_dir def __repr__(self): return "<%s: %s>" % ( self.__class__.__name__, os.sep.join([self.dirpath, self.file]), ) def __eq__(self, other): return self.path == other.path def __lt__(self, other): return self.path < other.path @property def path(self): return os.path.join(self.dirpath, self.file) class BuildFile(object): """ Represents the state of a translatable file during the build process. """ def __init__(self, command, domain, translatable): self.command = command self.domain = domain self.translatable = translatable @cached_property def is_templatized(self): if self.domain == 'djangojs': return self.command.gettext_version < (0, 18, 3) elif self.domain == 'django': file_ext = os.path.splitext(self.translatable.file)[1] return file_ext != '.py' return False @cached_property def path(self): return self.translatable.path @cached_property def work_path(self): """ Path to a file which is being fed into GNU gettext pipeline. This may be either a translatable or its preprocessed version. """ if not self.is_templatized: return self.path extension = { 'djangojs': 'c', 'django': 'py', }.get(self.domain) filename = '%s.%s' % (self.translatable.file, extension) return os.path.join(self.translatable.dirpath, filename) def preprocess(self): """ Preprocess (if necessary) a translatable file before passing it to xgettext GNU gettext utility. """ if not self.is_templatized: return encoding = settings.FILE_CHARSET if self.command.settings_available else 'utf-8' with io.open(self.path, 'r', encoding=encoding) as fp: src_data = fp.read() if self.domain == 'djangojs': content = prepare_js_for_gettext(src_data) elif self.domain == 'django': content = templatize(src_data, origin=self.path[2:], charset=encoding) with io.open(self.work_path, 'w', encoding='utf-8') as fp: fp.write(content) def postprocess_messages(self, msgs): """ Postprocess messages generated by xgettext GNU gettext utility. Transform paths as if these messages were generated from original translatable files rather than from preprocessed versions. """ if not self.is_templatized: return msgs # Remove '.py' suffix if os.name == 'nt': # Preserve '.\' prefix on Windows to respect gettext behavior old_path = self.work_path new_path = self.path else: old_path = self.work_path[2:] new_path = self.path[2:] return re.sub( r'^(#: .*)(' + re.escape(old_path) + r')', lambda match: match.group().replace(old_path, new_path), msgs, flags=re.MULTILINE ) def cleanup(self): """ Remove a preprocessed copy of a translatable file (if any). """ if self.is_templatized: # This check is needed for the case of a symlinked file and its # source being processed inside a single group (locale dir); # removing either of those two removes both. if os.path.exists(self.work_path): os.unlink(self.work_path) def normalize_eols(raw_contents): """ Take a block of raw text that will be passed through str.splitlines() to get universal newlines treatment. Return the resulting block of text with normalized `\n` EOL sequences ready to be written to disk using current platform's native EOLs. """ lines_list = raw_contents.splitlines() # Ensure last line has its EOL if lines_list and lines_list[-1]: lines_list.append('') return '\n'.join(lines_list) def write_pot_file(potfile, msgs): """ Write the :param potfile: POT file with the :param msgs: contents, previously making sure its format is valid. """ pot_lines = msgs.splitlines() if os.path.exists(potfile): # Strip the header lines = dropwhile(len, pot_lines) else: lines = [] found, header_read = False, False for line in pot_lines: if not found and not header_read: found = True line = line.replace('charset=CHARSET', 'charset=UTF-8') if not line and not found: header_read = True lines.append(line) msgs = '\n'.join(lines) with io.open(potfile, 'a', encoding='utf-8') as fp: fp.write(msgs) class Command(BaseCommand): help = ( "Runs over the entire source tree of the current directory and " "pulls out all strings marked for translation. It creates (or updates) a message " "file in the conf/locale (in the django tree) or locale (for projects and " "applications) directory.\n\nYou must run this command with one of either the " "--locale, --exclude, or --all options." ) translatable_file_class = TranslatableFile build_file_class = BuildFile requires_system_checks = False leave_locale_alone = True msgmerge_options = ['-q', '--previous'] msguniq_options = ['--to-code=utf-8'] msgattrib_options = ['--no-obsolete'] xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators'] def add_arguments(self, parser): parser.add_argument( '--locale', '-l', default=[], dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.', ) parser.add_argument( '--exclude', '-x', default=[], dest='exclude', action='append', help='Locales to exclude. Default is none. Can be used multiple times.', ) parser.add_argument( '--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").', ) parser.add_argument( '--all', '-a', action='store_true', dest='all', default=False, help='Updates the message files for all existing locales.', ) parser.add_argument( '--extension', '-e', dest='extensions', action='append', help='The file extension(s) to examine (default: "html,txt,py", or "js" ' 'if the domain is "djangojs"). Separate multiple extensions with ' 'commas, or use -e multiple times.', ) parser.add_argument( '--symlinks', '-s', action='store_true', dest='symlinks', default=False, help='Follows symlinks to directories when examining source code ' 'and templates for translation strings.', ) parser.add_argument( '--ignore', '-i', action='append', dest='ignore_patterns', default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. ' 'Use multiple times to ignore more.', ) parser.add_argument( '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.", ) parser.add_argument( '--no-wrap', action='store_true', dest='no_wrap', default=False, help="Don't break long message lines into several lines.", ) parser.add_argument( '--no-location', action='store_true', dest='no_location', default=False, help="Don't write '#: filename:line' lines.", ) parser.add_argument( '--no-obsolete', action='store_true', dest='no_obsolete', default=False, help="Remove obsolete message strings.", ) parser.add_argument( '--keep-pot', action='store_true', dest='keep_pot', default=False, help="Keep .pot file after making messages. Useful when debugging.", ) def handle(self, *args, **options): locale = options['locale'] exclude = options['exclude'] self.domain = options['domain'] self.verbosity = options['verbosity'] process_all = options['all'] extensions = options['extensions'] self.symlinks = options['symlinks'] ignore_patterns = options['ignore_patterns'] if options['use_default_ignore_patterns']: ignore_patterns += ['CVS', '.*', '*~', '*.pyc'] self.ignore_patterns = list(set(ignore_patterns)) # Avoid messing with mutable class variables if options['no_wrap']: self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap'] self.msguniq_options = self.msguniq_options[:] + ['--no-wrap'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap'] self.xgettext_options = self.xgettext_options[:] + ['--no-wrap'] if options['no_location']: self.msgmerge_options = self.msgmerge_options[:] + ['--no-location'] self.msguniq_options = self.msguniq_options[:] + ['--no-location'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-location'] self.xgettext_options = self.xgettext_options[:] + ['--no-location'] self.no_obsolete = options['no_obsolete'] self.keep_pot = options['keep_pot'] if self.domain not in ('django', 'djangojs'): raise CommandError("currently makemessages only supports domains " "'django' and 'djangojs'") if self.domain == 'djangojs': exts = extensions if extensions else ['js'] else: exts = extensions if extensions else ['html', 'txt', 'py'] self.extensions = handle_extensions(exts) if (locale is None and not exclude and not process_all) or self.domain is None: raise CommandError( "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1]) ) if self.verbosity > 1: self.stdout.write( 'examining files with the extensions: %s\n' % get_text_list(list(self.extensions), 'and') ) self.invoked_for_django = False self.locale_paths = [] self.default_locale_path = None if os.path.isdir(os.path.join('conf', 'locale')): self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))] self.default_locale_path = self.locale_paths[0] self.invoked_for_django = True else: if self.settings_available: self.locale_paths.extend(settings.LOCALE_PATHS) # Allow to run makemessages inside an app dir if os.path.isdir('locale'): self.locale_paths.append(os.path.abspath('locale')) if self.locale_paths: self.default_locale_path = self.locale_paths[0] if not os.path.exists(self.default_locale_path): os.makedirs(self.default_locale_path) # Build locale list locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path)) all_locales = map(os.path.basename, locale_dirs) # Account for excluded locales if process_all: locales = all_locales else: locales = locale or all_locales locales = set(locales) - set(exclude) if locales: check_programs('msguniq', 'msgmerge', 'msgattrib') check_programs('xgettext') try: potfiles = self.build_potfiles() # Build po files for each selected locale for locale in locales: if self.verbosity > 0: self.stdout.write("processing locale %s\n" % locale) for potfile in potfiles: self.write_po_file(potfile, locale) finally: if not self.keep_pot: self.remove_potfiles() @cached_property def gettext_version(self): # Gettext tools will output system-encoded bytestrings instead of UTF-8, # when looking up the version. It's especially a problem on Windows. out, err, status = popen_wrapper( ['xgettext', '--version'], stdout_encoding=DEFAULT_LOCALE_ENCODING, ) m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out) if m: return tuple(int(d) for d in m.groups() if d is not None) else: raise CommandError("Unable to get gettext version. Is it installed?") @cached_property def settings_available(self): try: settings.LOCALE_PATHS except ImproperlyConfigured: if self.verbosity > 1: self.stderr.write("Running without configured settings.") return False return True def build_potfiles(self): """ Build pot files and apply msguniq to them. """ file_list = self.find_files(".") self.remove_potfiles() self.process_files(file_list) potfiles = [] for path in self.locale_paths: potfile = os.path.join(path, '%s.pot' % str(self.domain)) if not os.path.exists(potfile): continue args = ['msguniq'] + self.msguniq_options + [potfile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msguniq\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) msgs = normalize_eols(msgs) with io.open(potfile, 'w', encoding='utf-8') as fp: fp.write(msgs) potfiles.append(potfile) return potfiles def remove_potfiles(self): for path in self.locale_paths: pot_path = os.path.join(path, '%s.pot' % str(self.domain)) if os.path.exists(pot_path): os.unlink(pot_path) def find_files(self, root): """ Helper method to get all files in the given root. Also check that there is a matching locale dir for each file. """ def is_ignored(path, ignore_patterns): """ Check if the given path should be ignored or not. """ filename = os.path.basename(path) def ignore(pattern): return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern) return any(ignore(pattern) for pattern in ignore_patterns) ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns] dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}} norm_patterns = [] for p in ignore_patterns: for dir_suffix in dir_suffixes: if p.endswith(dir_suffix): norm_patterns.append(p[:-len(dir_suffix)]) break else: norm_patterns.append(p) all_files = [] ignored_roots = [] if self.settings_available: ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p] for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks): for dirname in dirnames[:]: if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots): dirnames.remove(dirname) if self.verbosity > 1: self.stdout.write('ignoring directory %s\n' % dirname) elif dirname == 'locale': dirnames.remove(dirname) self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname)) for filename in filenames: file_path = os.path.normpath(os.path.join(dirpath, filename)) file_ext = os.path.splitext(filename)[1] if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns): if self.verbosity > 1: self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath)) else: locale_dir = None for path in self.locale_paths: if os.path.abspath(dirpath).startswith(os.path.dirname(path)): locale_dir = path break if not locale_dir: locale_dir = self.default_locale_path if not locale_dir: locale_dir = NO_LOCALE_DIR all_files.append(self.translatable_file_class(dirpath, filename, locale_dir)) return sorted(all_files) def process_files(self, file_list): """ Group translatable files by locale directory and run pot file build process for each group. """ file_groups = {} for translatable in file_list: file_group = file_groups.setdefault(translatable.locale_dir, []) file_group.append(translatable) for locale_dir, files in file_groups.items(): self.process_locale_dir(locale_dir, files) def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Uses the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write('processing file %s in %s\n' % ( translatable.file, translatable.dirpath )) if self.domain not in ('djangojs', 'django'): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except UnicodeDecodeError as e: self.stdout.write( 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( translatable.file, translatable.dirpath, e, ) ) continue build_files.append(build_file) if self.domain == 'djangojs': is_templatized = build_file.is_templatized args = [ 'xgettext', '-d', self.domain, '--language=%s' % ('C' if is_templatized else 'JavaScript',), '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--output=-', ] elif self.domain == 'django': args = [ 'xgettext', '-d', self.domain, '--language=Python', '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=ugettext_noop', '--keyword=ugettext_lazy', '--keyword=ungettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--keyword=pgettext_lazy:1c,2', '--keyword=npgettext_lazy:1c,2,3', '--output=-', ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode='w+') as input_files_list: input_files_list.write(force_str('\n'.join(input_files), encoding=DEFAULT_LOCALE_ENCODING)) input_files_list.flush() args.extend(['--files-from', input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( 'errors happened while running xgettext on %s\n%s' % ('\n'.join(input_files), errors) ) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: file_path = os.path.normpath(build_files[0].path) raise CommandError( 'Unable to find a locale path to store translations for ' 'file %s' % file_path ) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain)) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup() def write_po_file(self, potfile, locale): """ Creates or updates the PO file for self.domain and :param locale:. Uses contents of the existing :param potfile:. Uses msgmerge, and msgattrib GNU gettext utilities. """ basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES') if not os.path.isdir(basedir): os.makedirs(basedir) pofile = os.path.join(basedir, '%s.po' % str(self.domain)) if os.path.exists(pofile): args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgmerge\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) else: with io.open(potfile, 'r', encoding='utf-8') as fp: msgs = fp.read() if not self.invoked_for_django: msgs = self.copy_plural_forms(msgs, locale) msgs = normalize_eols(msgs) msgs = msgs.replace( "#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "") with io.open(pofile, 'w', encoding='utf-8') as fp: fp.write(msgs) if self.no_obsolete: args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgattrib\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) def copy_plural_forms(self, msgs, locale): """ Copies plural forms header contents from a Django catalog of locale to the msgs string, inserting it at the right place. msgs should be the contents of a newly created .po file. """ django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__)))) if self.domain == 'djangojs': domains = ('djangojs', 'django') else: domains = ('django',) for domain in domains: django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain) if os.path.exists(django_po): with io.open(django_po, 'r', encoding='utf-8') as fp: m = plural_forms_re.search(fp.read()) if m: plural_form_line = force_str(m.group('value')) if self.verbosity > 1: self.stdout.write("copying plural forms: %s\n" % plural_form_line) lines = [] found = False for line in msgs.splitlines(): if not found and (not line or plural_forms_re.search(line)): line = plural_form_line found = True lines.append(line) msgs = '\n'.join(lines) break return msgs # Copyright (c) 2005 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert import code import datetime import os import socket import sys __all__ = [ 'options', 'arguments', 'main' ] usage="%prog [gem5 options] script.py [script options]" version="%prog 2.0" brief_copyright=\ "gem5 is copyrighted software; use the --copyright option for details." def parse_options(): import config from options import OptionParser options = OptionParser(usage=usage, version=version, description=brief_copyright) option = options.add_option group = options.set_group # Help options option('-B', "--build-info", action="store_true", default=False, help="Show build information") option('-C', "--copyright", action="store_true", default=False, help="Show full copyright information") option('-R', "--readme", action="store_true", default=False, help="Show the readme") # Options for configuring the base simulator option('-d', "--outdir", metavar="DIR", default="m5out", help="Set the output directory to DIR [Default: %default]") option('-r', "--redirect-stdout", action="store_true", default=False, help="Redirect stdout (& stderr, without -e) to file") option('-e', "--redirect-stderr", action="store_true", default=False, help="Redirect stderr to file") option("--stdout-file", metavar="FILE", default="simout", help="Filename for -r redirection [Default: %default]") option("--stderr-file", metavar="FILE", default="simerr", help="Filename for -e redirection [Default: %default]") option('-i', "--interactive", action="store_true", default=False, help="Invoke the interactive interpreter after running the script") option("--pdb", action="store_true", default=False, help="Invoke the python debugger before running the script") option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':', help="Prepend PATH to the system path when invoking the script") option('-q', "--quiet", action="count", default=0, help="Reduce verbosity") option('-v', "--verbose", action="count", default=0, help="Increase verbosity") # Statistics options group("Statistics Options") option("--stats-file", metavar="FILE", default="stats.txt", help="Sets the output file for statistics [Default: %default]") # Configuration Options group("Configuration Options") option("--dump-config", metavar="FILE", default="config.ini", help="Dump configuration output file [Default: %default]") option("--json-config", metavar="FILE", default="config.json", help="Create JSON output of the configuration [Default: %default]") option("--dot-config", metavar="FILE", default="config.dot", help="Create DOT & pdf outputs of the configuration [Default: %default]") # Debugging options group("Debugging Options") option("--debug-break", metavar="TIME[,TIME]", action='append', split=',', help="Cycle to create a breakpoint") option("--debug-help", action='store_true', help="Print help on trace flags") option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',', help="Sets the flags for tracing (-FLAG disables a flag)") option("--remote-gdb-port", type='int', default=7000, help="Remote gdb base port (set to 0 to disable listening)") # Tracing options group("Trace Options") option("--trace-start", metavar="TIME", type='int', help="Start tracing at TIME (must be in ticks)") option("--trace-file", metavar="FILE", default="cout", help="Sets the output file for tracing [Default: %default]") option("--trace-ignore", metavar="EXPR", action='append', split=':', help="Ignore EXPR sim objects") # Help options group("Help Options") option("--list-sim-objects", action='store_true', default=False, help="List all built-in SimObjects, their params and default values") # load the options.py config file to allow people to set their own # default options options_file = config.get('options.py') if options_file: scope = { 'options' : options } execfile(options_file, scope) arguments = options.parse_args() return options,arguments def interact(scope): banner = "gem5 Interactive Console" sys.argv = [] try: from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed(banner=banner,user_ns=scope) ipshell() except ImportError: code.InteractiveConsole(scope).interact(banner) def main(*args): import m5 import core import debug import defines import event import info import stats import trace from util import fatal if len(args) == 0: options, arguments = parse_options() elif len(args) == 2: options, arguments = args else: raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args) m5.options = options def check_tracing(): if defines.TRACING_ON: return fatal("Tracing is not enabled. Compile with TRACING_ON") if not os.path.isdir(options.outdir): os.makedirs(options.outdir) # These filenames are used only if the redirect_std* options are set stdout_file = os.path.join(options.outdir, options.stdout_file) stderr_file = os.path.join(options.outdir, options.stderr_file) # Print redirection notices here before doing any redirection if options.redirect_stdout and not options.redirect_stderr: print "Redirecting stdout and stderr to", stdout_file else: if options.redirect_stdout: print "Redirecting stdout to", stdout_file if options.redirect_stderr: print "Redirecting stderr to", stderr_file # Now redirect stdout/stderr as desired if options.redirect_stdout: redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC) os.dup2(redir_fd, sys.stdout.fileno()) if not options.redirect_stderr: os.dup2(redir_fd, sys.stderr.fileno()) if options.redirect_stderr: redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC) os.dup2(redir_fd, sys.stderr.fileno()) done = False if options.build_info: done = True print 'Build information:' print print 'compiled %s' % defines.compileDate; print 'build options:' keys = defines.buildEnv.keys() keys.sort() for key in keys: val = defines.buildEnv[key] print ' %s = %s' % (key, val) print if options.copyright: done = True print info.COPYING print if options.readme: done = True print 'Readme:' print print info.README print if options.debug_help: done = True check_tracing() debug.help() if options.list_sim_objects: import SimObject done = True print "SimObjects:" objects = SimObject.allClasses.keys() objects.sort() for name in objects: obj = SimObject.allClasses[name] print " %s" % obj params = obj._params.keys() params.sort() for pname in params: param = obj._params[pname] default = getattr(param, 'default', '') print " %s" % pname if default: print " default: %s" % default print " desc: %s" % param.desc print print if done: sys.exit(0) # setting verbose and quiet at the same time doesn't make sense if options.verbose > 0 and options.quiet > 0: options.usage(2) verbose = options.verbose - options.quiet if options.verbose >= 0: print "gem5 Simulator System. http://gem5.org" print brief_copyright print print "gem5 compiled %s" % defines.compileDate; print "gem5 started %s" % \ datetime.datetime.now().strftime("%b %e %Y %X") print "gem5 executing on %s" % socket.gethostname() print "command line:", for argv in sys.argv: print argv, print # check to make sure we can find the listed script if not arguments or not os.path.isfile(arguments[0]): if arguments and not os.path.isfile(arguments[0]): print "Script %s not found" % arguments[0] options.usage(2) # tell C++ about output directory core.setOutputDir(options.outdir) # update the system path with elements from the -p option sys.path[0:0] = options.path # set stats options stats.initText(options.stats_file) # set debugging options debug.setRemoteGDBPort(options.remote_gdb_port) for when in options.debug_break: debug.schedBreakCycle(int(when)) if options.debug_flags: check_tracing() on_flags = [] off_flags = [] for flag in options.debug_flags: off = False if flag.startswith('-'): flag = flag[1:] off = True if flag not in debug.flags: print >>sys.stderr, "invalid debug flag '%s'" % flag sys.exit(1) if off: debug.flags[flag].disable() else: debug.flags[flag].enable() if options.trace_start: check_tracing() e = event.create(trace.enable, event.Event.Trace_Enable_Pri) event.mainq.schedule(e, options.trace_start) else: trace.enable() trace.output(options.trace_file) for ignore in options.trace_ignore: check_tracing() trace.ignore(ignore) sys.argv = arguments sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path filename = sys.argv[0] filedata = file(filename, 'r').read() filecode = compile(filedata, filename, 'exec') scope = { '__file__' : filename, '__name__' : '__m5_main__' } # we want readline if we're doing anything interactive if options.interactive or options.pdb: exec "import readline" in scope # if pdb was requested, execfile the thing under pdb, otherwise, # just do the execfile normally if options.pdb: import pdb import traceback pdb = pdb.Pdb() try: pdb.run(filecode, scope) except SystemExit: print "The program exited via sys.exit(). Exit status: ", print sys.exc_info()[1] except: traceback.print_exc() print "Uncaught exception. Entering post mortem debugging" t = sys.exc_info()[2] while t.tb_next is not None: t = t.tb_next pdb.interaction(t.tb_frame,t) else: exec filecode in scope # once the script is done if options.interactive: interact(scope) if __name__ == '__main__': from pprint import pprint options, arguments = parse_options() print 'opts:' pprint(options, indent=4) print print 'args:' pprint(arguments, indent=4) from django.conf import settings from django.core.urlresolvers import reverse from django.test import TestCase from django.http import HttpRequest from oscar.test.factories import create_product from oscar.core.compat import get_user_model from oscar.apps.customer import history from oscar.templatetags.history_tags import get_back_button User = get_user_model() COOKIE_NAME = settings.OSCAR_RECENTLY_VIEWED_COOKIE_NAME class HistoryHelpersTest(TestCase): def setUp(self): self.product = create_product() def test_viewing_product_creates_cookie(self): response = self.client.get(self.product.get_absolute_url()) self.assertTrue(COOKIE_NAME in response.cookies) def test_id_gets_added_to_cookie(self): response = self.client.get(self.product.get_absolute_url()) request = HttpRequest() request.COOKIES[COOKIE_NAME] = response.cookies[COOKIE_NAME].value self.assertTrue(self.product.id in history.extract(request)) def test_get_back_button(self): request = HttpRequest() request.META['SERVER_NAME'] = 'test' request.META['SERVER_PORT'] = 8000 request.META['HTTP_REFERER'] = 'http://www.google.com' backbutton = get_back_button({'request': request}) self.assertEqual(backbutton, None) request.META['HTTP_REFERER'] = 'http://test:8000/search/' backbutton = get_back_button({'request': request}) self.assertTrue(backbutton) self.assertEqual(backbutton['title'], 'Back to search results') class TestAUserWhoLogsOut(TestCase): username = 'customer' password = 'cheeseshop' email = 'customer@example.com' def setUp(self): self.product = create_product() User.objects.create_user(username=self.username, email=self.email, password=self.password) self.client.login(email=self.email, password=self.password) def test_has_their_cookies_deleted_on_logout(self): response = self.client.get(self.product.get_absolute_url()) self.assertTrue(COOKIE_NAME in response.cookies) response = self.client.get(reverse('customer:logout')) self.assertTrue((COOKIE_NAME not in response.cookies) or not self.client.cookies['oscar_recently_viewed_products'].coded_value) from sqlalchemy import testing from sqlalchemy.testing import ( fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL) from sqlalchemy import ( exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists, text) from sqlalchemy.orm import ( configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased) from sqlalchemy.sql import table, column from sqlalchemy.engine import default import sqlalchemy as sa from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.orm.util import join class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper( User, users, properties={ 'addresses': relationship( Address, backref='user', order_by=addresses.c.id), 'orders': relationship( Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper( Address, addresses, properties={ 'dingaling': relationship( Dingaling, uselist=False, backref="address") # o2o }) mapper(Dingaling, dingalings) mapper( Order, orders, properties={ 'items': relationship( Item, secondary=order_items, order_by=items.c.id), # m2m 'address': relationship(Address), # m2o }) mapper( Item, items, properties={ 'keywords': relationship( Keyword, secondary=item_keywords)}) # m2m mapper(Keyword, keywords) mapper( Node, nodes, properties={ 'children': relationship( Node, backref=backref('parent', remote_side=[nodes.c.id])) }) mapper(CompositePk, composite_pk_table) configure_mappers() class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL): query_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" query_not_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses, users " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" def test_as_scalar_select_auto_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id == users.c.id).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile( query, self.query_correlated, dialect=default.DefaultDialect()) def test_as_scalar_select_explicit_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id == users.c.id).correlate(users).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile( query, self.query_correlated, dialect=default.DefaultDialect()) def test_as_scalar_select_correlate_off(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id == users.c.id).correlate(None).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile( query, self.query_not_correlated, dialect=default.DefaultDialect()) def test_as_scalar_query_auto_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id == User.id)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile( query, self.query_correlated, dialect=default.DefaultDialect()) def test_as_scalar_query_explicit_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id)). \ filter(Address.user_id == User.id). \ correlate(self.tables.users).as_scalar() query = sess.query(User.name, query) self.assert_compile( query, self.query_correlated, dialect=default.DefaultDialect()) def test_as_scalar_query_correlate_off(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id)). \ filter(Address.user_id == User.id).correlate(None).as_scalar() query = sess.query(User.name, query) self.assert_compile( query, self.query_not_correlated, dialect=default.DefaultDialect()) class RawSelectTest(QueryTest, AssertsCompiledSQL): """compare a bunch of select() tests with the equivalent Query using straight table/columns. Results should be the same as Query should act as a select() pass- thru for ClauseElement entities. """ __dialect__ = 'default' def test_select(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile( sess.query(users).select_entity_from(users.select()). with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name " "FROM users, " "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", ) self.assert_compile( sess.query(users, exists([1], from_obj=addresses)). with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, EXISTS " "(SELECT 1 FROM addresses) AS anon_1 FROM users", ) # a little tedious here, adding labels to work around Query's # auto-labelling. s = sess.query( addresses.c.id.label('id'), addresses.c.email_address.label('email')).\ filter(addresses.c.user_id == users.c.id).correlate(users).\ statement.alias() self.assert_compile( sess.query(users, s.c.email).select_entity_from( users.join(s, s.c.id == users.c.id) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email AS anon_1_email " "FROM users JOIN (SELECT addresses.id AS id, " "addresses.email_address AS email FROM addresses, users " "WHERE addresses.user_id = users.id) AS anon_1 " "ON anon_1.id = users.id",) x = func.lala(users.c.id).label('foo') self.assert_compile(sess.query(x).filter(x == 5).statement, "SELECT lala(users.id) AS foo FROM users WHERE " "lala(users.id) = :param_1") self.assert_compile(sess.query(func.sum(x).label('bar')).statement, "SELECT sum(lala(users.id)) AS bar FROM users") class FromSelfTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_filter(self): User = self.classes.User eq_( [User(id=8), User(id=9)], create_session().query(User).filter(User.id.in_([8, 9])). from_self().all()) eq_( [User(id=8), User(id=9)], create_session().query(User).order_by(User.id).slice(1, 3). from_self().all()) eq_( [User(id=8)], list( create_session().query(User).filter(User.id.in_([8, 9])). from_self().order_by(User.id)[0:1])) def test_join(self): User, Address = self.classes.User, self.classes.Address eq_( [ (User(id=8), Address(id=2)), (User(id=8), Address(id=3)), (User(id=8), Address(id=4)), (User(id=9), Address(id=5))], create_session().query(User).filter(User.id.in_([8, 9])). from_self().join('addresses').add_entity(Address). order_by(User.id, Address.id).all() ) def test_group_by(self): Address = self.classes.Address eq_( create_session(). query(Address.user_id, func.count(Address.id).label('count')). group_by(Address.user_id).order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) eq_( create_session().query(Address.user_id, Address.id). from_self(Address.user_id, func.count(Address.id)). group_by(Address.user_id).order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) def test_having(self): User = self.classes.User s = create_session() self.assert_compile( s.query(User.id).group_by(User.id).having(User.id > 5). from_self(), "SELECT anon_1.users_id AS anon_1_users_id FROM " "(SELECT users.id AS users_id FROM users GROUP " "BY users.id HAVING users.id > :id_1) AS anon_1" ) def test_no_joinedload(self): """test that joinedloads are pushed outwards and not rendered in subqueries.""" User = self.classes.User s = create_session() self.assert_compile( s.query(User).options(joinedload(User.addresses)). from_self().statement, "SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, " "addresses_1.user_id, addresses_1.email_address FROM " "(SELECT users.id AS users_id, users.name AS " "users_name FROM users) AS anon_1 LEFT OUTER JOIN " "addresses AS addresses_1 ON anon_1.users_id = " "addresses_1.user_id ORDER BY addresses_1.id" ) def test_aliases(self): """test that aliased objects are accessible externally to a from_self() call.""" User, Address = self.classes.User, self.classes.Address s = create_session() ualias = aliased(User) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(User.name, ualias.name). order_by(User.name, ualias.name).all(), [ ('chuck', 'ed'), ('chuck', 'fred'), ('chuck', 'jack'), ('ed', 'jack'), ('fred', 'ed'), ('fred', 'jack') ] ) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(User.name, ualias.name).filter(ualias.name == 'ed'). order_by(User.name, ualias.name).all(), [('chuck', 'ed'), ('fred', 'ed')]) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(ualias.name, Address.email_address). join(ualias.addresses). order_by(ualias.name, Address.email_address).all(), [ ('ed', 'fred@fred.com'), ('jack', 'ed@bettyboop.com'), ('jack', 'ed@lala.com'), ('jack', 'ed@wood.com'), ('jack', 'fred@fred.com')]) def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() eq_( sess.query(User, Address). filter(User.id == Address.user_id). filter(Address.id.in_([2, 5])).from_self().all(), [ (User(id=8), Address(id=2)), (User(id=9), Address(id=5))]) eq_( sess.query(User, Address).filter(User.id == Address.user_id). filter(Address.id.in_([2, 5])).from_self(). options(joinedload('addresses')).first(), ( User( id=8, addresses=[Address(), Address(), Address()]), Address(id=2)),) def test_multiple_with_column_entities(self): User = self.classes.User sess = create_session() eq_( sess.query(User.id).from_self(). add_column(func.count().label('foo')).group_by(User.id). order_by(User.id).from_self().all(), [ (7, 1), (8, 1), (9, 1), (10, 1)]) class ColumnAccessTest(QueryTest, AssertsCompiledSQL): """test access of columns after _from_selectable has been applied""" __dialect__ = 'default' def test_from_self(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self() self.assert_compile( q.filter(User.name == 'ed'), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name FROM (SELECT users.id AS users_id, users.name " "AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = " ":name_1" ) def test_from_self_twice(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self(User.id, User.name).from_self() self.assert_compile( q.filter(User.name == 'ed'), "SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, " "anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM " "(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name " "AS anon_2_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users) AS anon_2) AS anon_1 " "WHERE anon_1.anon_2_users_name = :name_1" ) def test_select_entity_from(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_entity_from(q.statement) self.assert_compile( q.filter(User.name == 'ed'), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE anon_1.name = :name_1" ) def test_select_entity_from_no_entities(self): User = self.classes.User sess = create_session() assert_raises_message( sa.exc.ArgumentError, r"A selectable \(FromClause\) instance is " "expected when the base alias is being set", sess.query(User).select_entity_from, User) def test_select_from_no_aliasing(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_from(q.statement) self.assert_compile( q.filter(User.name == 'ed'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users, (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE users.name = :name_1" ) def test_anonymous_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1, c2 WHERE " "c1 = :c1_1 UNION SELECT c1, c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(c1), "SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS " "anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 " "AS anon_2_c2 " "FROM (SELECT c1, c2 WHERE c1 = :c1_1) AS " "anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1" ) def test_anonymous_expression_union(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1, c2 WHERE " "c1 = :c1_1 UNION SELECT c1, c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_table_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column sess = create_session() t1 = table('t1', column('c1'), column('c2')) q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(t1.c.c1), "SELECT anon_1.anon_2_t1_c1 " "AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 " "AS anon_1_anon_2_t1_c2 " "FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, " "anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 " "AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 " "ORDER BY anon_1.anon_2_t1_c1" ) def test_anonymous_labeled_expression(self): sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'dog') q2 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM " "(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT " "c1 AS foo, c2 AS bar " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo") def test_anonymous_expression_plus_aliased_join(self): """test that the 'dont alias non-ORM' rule remains for other kinds of aliasing when _from_selectable() is used.""" User = self.classes.User Address = self.classes.Address addresses = self.tables.addresses sess = create_session() q1 = sess.query(User.id).filter(User.id > 5) q1 = q1.from_self() q1 = q1.join(User.addresses, aliased=True).\ order_by(User.id, Address.id, addresses.c.id) self.assert_compile( q1, "SELECT anon_1.users_id AS anon_1_users_id " "FROM (SELECT users.id AS users_id FROM users " "WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 " "ON anon_1.users_id = addresses_1.user_id " "ORDER BY anon_1.users_id, addresses_1.id, addresses.id" ) class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table( 'a', metadata, Column( 'id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)), Column('bid', Integer, ForeignKey('b.id')) ) Table( 'b', metadata, Column( 'id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)) ) Table( 'c', metadata, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('age', Integer)) Table( 'd', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('dede', Integer)) @classmethod def setup_classes(cls): a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d) class A(cls.Comparable): pass class B(cls.Comparable): pass class C(B): pass class D(A): pass mapper( A, a, polymorphic_identity='a', polymorphic_on=a.c.type, with_polymorphic=('*', None), properties={ 'link': relation(B, uselist=False, backref='back')}) mapper( B, b, polymorphic_identity='b', polymorphic_on=b.c.type, with_polymorphic=('*', None)) mapper(C, c, inherits=B, polymorphic_identity='c') mapper(D, d, inherits=A, polymorphic_identity='d') @classmethod def insert_data(cls): A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B) sess = create_session() sess.add_all( [ B(name='b1'), A(name='a1', link=C(name='c1', age=3)), C(name='c2', age=6), A(name='a2')]) sess.flush() def test_add_entity_equivalence(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = create_session() for q in [ sess.query(A, B).join(A.link), sess.query(A).join(A.link).add_entity(B), ]: eq_( q.all(), [( A(bid=2, id=1, name='a1', type='a'), C(age=3, id=2, name='c1', type='c') )] ) for q in [ sess.query(B, A).join(B.back), sess.query(B).join(B.back).add_entity(A), sess.query(B).add_entity(A).join(B.back) ]: eq_( q.all(), [( C(age=3, id=2, name='c1', type='c'), A(bid=2, id=1, name='a1', type='a') )] ) class InstancesTest(QueryTest, AssertsCompiledSQL): def test_from_alias_one(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id == 7).\ union(users.select(users.c.id > 7)).alias('ulist').\ outerjoin(addresses).\ select( use_labels=True, order_by=[text('ulist.id'), addresses.c.id]) sess = create_session() q = sess.query(User) def go(): l = list( q.options( contains_alias('ulist'), contains_eager('addresses')). instances(query.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_from_alias_two(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id == 7).\ union(users.select(users.c.id > 7)).alias('ulist').\ outerjoin(addresses). \ select( use_labels=True, order_by=[text('ulist.id'), addresses.c.id]) sess = create_session() q = sess.query(User) def go(): l = q.options( contains_alias('ulist'), contains_eager('addresses')).\ from_statement(query).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_from_alias_three(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id == 7).\ union(users.select(users.c.id > 7)).alias('ulist').\ outerjoin(addresses). \ select( use_labels=True, order_by=[text('ulist.id'), addresses.c.id]) sess = create_session() # better way. use select_entity_from() def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses')).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_from_alias_four(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) sess = create_session() # same thing, but alias addresses, so that the adapter # generated by select_entity_from() is wrapped within # the adapter created by contains_eager() adalias = addresses.alias() query = users.select(users.c.id == 7).\ union(users.select(users.c.id > 7)).\ alias('ulist').outerjoin(adalias).\ select(use_labels=True, order_by=[text('ulist.id'), adalias.c.id]) def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses', alias=adalias)).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) sess = create_session() # test that contains_eager suppresses the normal outer join rendering q = sess.query(User).outerjoin(User.addresses).\ options(contains_eager(User.addresses)).\ order_by(User.id, addresses.c.id) self.assert_compile(q.with_labels().statement, 'SELECT addresses.id AS addresses_id, ' 'addresses.user_id AS addresses_user_id, ' 'addresses.email_address AS ' 'addresses_email_address, users.id AS ' 'users_id, users.name AS users_name FROM ' 'users LEFT OUTER JOIN addresses ON ' 'users.id = addresses.user_id ORDER BY ' 'users.id, addresses.id', dialect=default.DefaultDialect()) def go(): assert self.static.user_address_result == q.all() self.assert_sql_count(testing.db, go, 1) sess.expunge_all() adalias = addresses.alias() q = sess.query(User).\ select_entity_from(users.outerjoin(adalias)).\ options(contains_eager(User.addresses, alias=adalias)).\ order_by(User.id, adalias.c.id) def go(): eq_(self.static.user_address_result, q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() selectquery = users.outerjoin(addresses). \ select( users.c.id < 10, use_labels=True, order_by=[users.c.id, addresses.c.id]) q = sess.query(User) def go(): l = list( q.options(contains_eager('addresses')). instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = list( q.options(contains_eager(User.addresses)). instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options( contains_eager('addresses')).from_statement(selectquery).all() assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_string_alias(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias). \ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # string alias name def go(): l = list( q.options( contains_eager('addresses', alias="adalias")). instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased_instances(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # expression.Alias object def go(): l = list( q.options( contains_eager('addresses', alias=adalias)). instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User) # Aliased object adalias = aliased(Address) def go(): l = q.options( contains_eager('addresses', alias=adalias) ).outerjoin(adalias, User.addresses).\ order_by(User.id, adalias.id) assert self.static.user_address_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_string_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).outerjoin(order_items).\ outerjoin(ialias).select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using string alias with more than one level deep def go(): l = list( q.options( contains_eager('orders', alias='o1'), contains_eager('orders.items', alias='i1') ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).outerjoin(order_items).\ outerjoin(ialias).select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using Alias with more than one level deep # new way: # from sqlalchemy.orm.strategy_options import Load # opt = Load(User).contains_eager('orders', alias=oalias). # contains_eager('items', alias=ialias) def go(): l = list( q.options( contains_eager('orders', alias=oalias), contains_eager('orders.items', alias=ialias)). instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_aliased(self): Item, User, Order = ( self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() q = sess.query(User) # test using Aliased with more than one level deep oalias = aliased(Order) ialias = aliased(Item) def go(): l = q.options( contains_eager(User.orders, alias=oalias), contains_eager(User.orders, Order.items, alias=ialias)).\ outerjoin(oalias, User.orders).\ outerjoin(ialias, oalias.items).\ order_by(User.id, oalias.id, ialias.id) assert self.static.user_order_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining(self): """test that contains_eager() 'chains' by default.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User).join(User.addresses).join(Address.dingaling).\ options(contains_eager(User.addresses, Address.dingaling),) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining_aliased_endpoint(self): """test that contains_eager() 'chains' by default and supports an alias at the end.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() da = aliased(Dingaling, name="foob") q = sess.query(User).join(User.addresses).\ join(da, Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling, alias=da),) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_mixed_eager_contains_with_limit(self): Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User) def go(): # outerjoin to User.orders, offset 1/limit 2 so we get user # 7 + second two orders. then joinedload the addresses. # User + Order columns go into the subquery, address left # outer joins to the subquery, joinedloader for User.orders # applies context.adapter to result rows. This was # [ticket:1180]. l = q.outerjoin(User.orders).options( joinedload(User.addresses), contains_eager(User.orders)). \ order_by(User.id, Order.id).offset(1).limit(2).all() eq_( l, [ User( id=7, addresses=[ Address( email_address='jack@bean.com', user_id=7, id=1)], name='jack', orders=[ Order( address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order( address_id=None, user_id=7, description='order 5', isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): # same as above, except Order is aliased, so two adapters # are applied by the eager loader oalias = aliased(Order) l = q.outerjoin(oalias, User.orders).options( joinedload(User.addresses), contains_eager(User.orders, alias=oalias)). \ order_by(User.id, oalias.id).\ offset(1).limit(2).all() eq_( l, [ User( id=7, addresses=[ Address( email_address='jack@bean.com', user_id=7, id=1)], name='jack', orders=[ Order( address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order( address_id=None, user_id=7, description='order 5', isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) class MixedEntitiesTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_values(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) q2 = q.select_entity_from(sel).values(User.name) eq_(list(q2), [('jack',), ('ed',)]) q = sess.query(User) q2 = q.order_by(User.id).\ values(User.name, User.name + " " + cast(User.id, String(50))) eq_( list(q2), [ ('jack', 'jack 7'), ('ed', 'ed 8'), ('fred', 'fred 9'), ('chuck', 'chuck 10')] ) q2 = q.join('addresses').filter(User.name.like('%e%')).\ order_by(User.id, Address.id).\ values(User.name, Address.email_address) eq_( list(q2), [ ('ed', 'ed@wood.com'), ('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('fred', 'fred@fred.com')]) q2 = q.join('addresses').filter(User.name.like('%e%')).\ order_by(desc(Address.email_address)).\ slice(1, 3).values(User.name, Address.email_address) eq_(list(q2), [('ed', 'ed@wood.com'), ('ed', 'ed@lala.com')]) adalias = aliased(Address) q2 = q.join(adalias, 'addresses'). \ filter(User.name.like('%e%')).order_by(adalias.email_address).\ values(User.name, adalias.email_address) eq_(list(q2), [('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('ed', 'ed@wood.com'), ('fred', 'fred@fred.com')]) q2 = q.values(func.count(User.name)) assert next(q2) == (4,) q2 = q.select_entity_from(sel).filter(User.id == 8). \ values(User.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'ed', 'ed')]) # using User.xxx is alised against "sel", so this query returns nothing q2 = q.select_entity_from(sel).filter(User.id == 8).\ filter(User.id > sel.c.id).values(User.name, sel.c.name, User.name) eq_(list(q2), []) # whereas this uses users.c.xxx, is not aliased and creates a new join q2 = q.select_entity_from(sel).filter(users.c.id == 8).\ filter(users.c.id > sel.c.id). \ values(users.c.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'jack', 'jack')]) def test_alias_naming(self): User = self.classes.User sess = create_session() ua = aliased(User, name="foobar") q = sess.query(ua) self.assert_compile( q, "SELECT foobar.id AS foobar_id, " "foobar.name AS foobar_name FROM users AS foobar" ) @testing.fails_on('mssql', 'FIXME: unknown') def test_values_specific_order_by(self): users, User = self.tables.users, self.classes.User sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) u2 = aliased(User) q2 = q.select_entity_from(sel).filter(u2.id > 1).\ order_by(User.id, sel.c.id, u2.id).\ values(User.name, sel.c.name, u2.name) eq_( list(q2), [ ('jack', 'jack', 'jack'), ('jack', 'jack', 'ed'), ('jack', 'jack', 'fred'), ('jack', 'jack', 'chuck'), ('ed', 'ed', 'jack'), ('ed', 'ed', 'ed'), ('ed', 'ed', 'fred'), ('ed', 'ed', 'chuck')]) @testing.fails_on('mssql', 'FIXME: unknown') @testing.fails_on('oracle', "Oracle doesn't support boolean expressions as " "columns") @testing.fails_on('postgresql+pg8000', "pg8000 parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on('postgresql+zxjdbc', "zxjdbc parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on("firebird", "unknown") def test_values_with_boolean_selects(self): """Tests a values clause that works with select boolean evaluations""" User = self.classes.User sess = create_session() q = sess.query(User) q2 = q.group_by(User.name.like('%j%')).\ order_by(desc(User.name.like('%j%'))).\ values(User.name.like('%j%'), func.count(User.name.like('%j%'))) eq_(list(q2), [(True, 1), (False, 3)]) q2 = q.order_by(desc(User.name.like('%j%'))). \ values(User.name.like('%j%')) eq_(list(q2), [(True,), (False,), (False,), (False,)]) def test_correlated_subquery(self): """test that a subquery constructed from ORM attributes doesn't leak out those entities to the outermost query.""" Address, users, User = ( self.classes.Address, self.tables.users, self.classes.User) sess = create_session() subq = select([func.count()]).where(User.id == Address.user_id).\ correlate(users).label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [ (User(id=7, name='jack'), 1), (User(id=8, name='ed'), 3), (User(id=9, name='fred'), 1)]) # same thing without the correlate, as it should # not be needed subq = select([func.count()]).where(User.id == Address.user_id).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [ (User(id=7, name='jack'), 1), (User(id=8, name='ed'), 3), (User(id=9, name='fred'), 1)]) def test_column_queries(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() eq_( sess.query(User.name).all(), [('jack',), ('ed',), ('fred',), ('chuck',)]) sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User.name) q2 = q.select_entity_from(sel).all() eq_(list(q2), [('jack',), ('ed',)]) eq_( sess.query(User.name, Address.email_address). filter(User.id == Address.user_id).all(), [ ('jack', 'jack@bean.com'), ('ed', 'ed@wood.com'), ('ed', 'ed@bettyboop.com'), ('ed', 'ed@lala.com'), ('fred', 'fred@fred.com')]) eq_( sess.query(User.name, func.count(Address.email_address)). outerjoin(User.addresses).group_by(User.id, User.name). order_by(User.id).all(), [('jack', 1), ('ed', 3), ('fred', 1), ('chuck', 0)]) eq_( sess.query(User, func.count(Address.email_address)). outerjoin(User.addresses).group_by(User). order_by(User.id).all(), [ (User(name='jack', id=7), 1), (User(name='ed', id=8), 3), (User(name='fred', id=9), 1), (User(name='chuck', id=10), 0)]) eq_( sess.query(func.count(Address.email_address), User). outerjoin(User.addresses).group_by(User). order_by(User.id).all(), [ (1, User(name='jack', id=7)), (3, User(name='ed', id=8)), (1, User(name='fred', id=9)), (0, User(name='chuck', id=10))]) adalias = aliased(Address) eq_( sess.query(User, func.count(adalias.email_address)). outerjoin(adalias, 'addresses').group_by(User). order_by(User.id).all(), [ (User(name='jack', id=7), 1), (User(name='ed', id=8), 3), (User(name='fred', id=9), 1), (User(name='chuck', id=10), 0)]) eq_( sess.query(func.count(adalias.email_address), User). outerjoin(adalias, User.addresses).group_by(User). order_by(User.id).all(), [ (1, User(name='jack', id=7)), (3, User(name='ed', id=8)), (1, User(name='fred', id=9)), (0, User(name='chuck', id=10))] ) # select from aliasing + explicit aliasing eq_( sess.query(User, adalias.email_address, adalias.id). outerjoin(adalias, User.addresses). from_self(User, adalias.email_address). order_by(User.id, adalias.id).all(), [ (User(name='jack', id=7), 'jack@bean.com'), (User(name='ed', id=8), 'ed@wood.com'), (User(name='ed', id=8), 'ed@bettyboop.com'), (User(name='ed', id=8), 'ed@lala.com'), (User(name='fred', id=9), 'fred@fred.com'), (User(name='chuck', id=10), None) ] ) # anon + select from aliasing eq_( sess.query(User).join(User.addresses, aliased=True). filter(Address.email_address.like('%ed%')). from_self().all(), [ User(name='ed', id=8), User(name='fred', id=9), ] ) # test eager aliasing, with/without select_entity_from aliasing for q in [ sess.query(User, adalias.email_address). outerjoin(adalias, User.addresses). options(joinedload(User.addresses)). order_by(User.id, adalias.id).limit(10), sess.query(User, adalias.email_address, adalias.id). outerjoin(adalias, User.addresses). from_self(User, adalias.email_address). options(joinedload(User.addresses)). order_by(User.id, adalias.id).limit(10), ]: eq_( q.all(), [ ( User( addresses=[ Address( user_id=7, email_address='jack@bean.com', id=1)], name='jack', id=7), 'jack@bean.com'), ( User( addresses=[ Address( user_id=8, email_address='ed@wood.com', id=2), Address( user_id=8, email_address='ed@bettyboop.com', id=3), Address( user_id=8, email_address='ed@lala.com', id=4)], name='ed', id=8), 'ed@wood.com'), ( User( addresses=[ Address( user_id=8, email_address='ed@wood.com', id=2), Address( user_id=8, email_address='ed@bettyboop.com', id=3), Address( user_id=8, email_address='ed@lala.com', id=4)], name='ed', id=8), 'ed@bettyboop.com'), ( User( addresses=[ Address( user_id=8, email_address='ed@wood.com', id=2), Address( user_id=8, email_address='ed@bettyboop.com', id=3), Address( user_id=8, email_address='ed@lala.com', id=4)], name='ed', id=8), 'ed@lala.com'), ( User( addresses=[ Address( user_id=9, email_address='fred@fred.com', id=5)], name='fred', id=9), 'fred@fred.com'), (User(addresses=[], name='chuck', id=10), None)]) def test_column_from_limited_joinedload(self): User = self.classes.User sess = create_session() def go(): results = sess.query(User).limit(1).\ options(joinedload('addresses')).add_column(User.name).all() eq_(results, [(User(name='jack'), 'jack')]) self.assert_sql_count(testing.db, go, 1) @testing.fails_on("firebird", "unknown") def test_self_referential(self): Order = self.classes.Order sess = create_session() oalias = aliased(Order) for q in [ sess.query(Order, oalias).filter(Order.user_id == oalias.user_id). filter(Order.user_id == 7). filter(Order.id > oalias.id).order_by(Order.id, oalias.id), sess.query(Order, oalias).from_self(). filter(Order.user_id == oalias.user_id).filter(Order.user_id == 7). filter(Order.id > oalias.id).order_by(Order.id, oalias.id), # same thing, but reversed. sess.query(oalias, Order).from_self(). filter(oalias.user_id == Order.user_id). filter(oalias.user_id == 7).filter(Order.id < oalias.id). order_by(oalias.id, Order.id), # here we go....two layers of aliasing sess.query(Order, oalias).filter(Order.user_id == oalias.user_id). filter(Order.user_id == 7).filter(Order.id > oalias.id). from_self().order_by(Order.id, oalias.id). limit(10).options(joinedload(Order.items)), # gratuitous four layers sess.query(Order, oalias).filter(Order.user_id == oalias.user_id). filter(Order.user_id == 7).filter(Order.id > oalias.id). from_self().from_self().from_self().order_by(Order.id, oalias.id). limit(10).options(joinedload(Order.items)), ]: eq_( q.all(), [ ( Order( address_id=1, description='order 3', isopen=1, user_id=7, id=3), Order( address_id=1, description='order 1', isopen=0, user_id=7, id=1)), ( Order( address_id=None, description='order 5', isopen=0, user_id=7, id=5), Order( address_id=1, description='order 1', isopen=0, user_id=7, id=1)), ( Order( address_id=None, description='order 5', isopen=0, user_id=7, id=5), Order( address_id=1, description='order 3', isopen=1, user_id=7, id=3)) ] ) # ensure column expressions are taken from inside the subquery, not # restated at the top q = sess.query( Order.id, Order.description, literal_column("'q'").label('foo')).\ filter(Order.description == 'order 3').from_self() self.assert_compile( q, "SELECT anon_1.orders_id AS " "anon_1_orders_id, anon_1.orders_descriptio" "n AS anon_1_orders_description, " "anon_1.foo AS anon_1_foo FROM (SELECT " "orders.id AS orders_id, " "orders.description AS orders_description, " "'q' AS foo FROM orders WHERE " "orders.description = :description_1) AS " "anon_1") eq_( q.all(), [(3, 'order 3', 'q')] ) def test_multi_mappers(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) test_session = create_session() (user7, user8, user9, user10) = test_session.query(User).all() (address1, address2, address3, address4, address5) = \ test_session.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] sess = create_session() selectquery = users.outerjoin(addresses). \ select(use_labels=True, order_by=[users.c.id, addresses.c.id]) eq_( list(sess.query(User, Address).instances(selectquery.execute())), expected) sess.expunge_all() for address_entity in (Address, aliased(Address)): q = sess.query(User).add_entity(address_entity).\ outerjoin(address_entity, 'addresses').\ order_by(User.id, address_entity.id) eq_(q.all(), expected) sess.expunge_all() q = sess.query(User).add_entity(address_entity) q = q.join(address_entity, 'addresses') q = q.filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity). \ join(address_entity, 'addresses'). \ filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity). \ join(address_entity, 'addresses').\ options(joinedload('addresses')).\ filter_by(email_address='ed@bettyboop.com') eq_(list(util.OrderedSet(q.all())), [(user8, address3)]) sess.expunge_all() def test_aliased_multi_mappers(self): User, addresses, users, Address = (self.classes.User, self.tables.addresses, self.tables.users, self.classes.Address) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = \ sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) adalias = addresses.alias('adalias') q = q.add_entity(Address, alias=adalias). \ select_entity_from(users.outerjoin(adalias)) l = q.order_by(User.id, adalias.c.id).all() assert l == expected sess.expunge_all() q = sess.query(User).add_entity(Address, alias=adalias) l = q.select_entity_from(users.outerjoin(adalias)). \ filter(adalias.c.email_address == 'ed@bettyboop.com').all() assert l == [(user8, address3)] def test_with_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User).filter(User.id == 7).order_by(User.name) self.assert_compile( q.with_entities(User.id, Address). filter(Address.user_id == User.id), 'SELECT users.id AS users_id, addresses.id ' 'AS addresses_id, addresses.user_id AS ' 'addresses_user_id, addresses.email_address' ' AS addresses_email_address FROM users, ' 'addresses WHERE users.id = :id_1 AND ' 'addresses.user_id = users.id ORDER BY ' 'users.name') def test_multi_columns(self): users, User = self.tables.users, self.classes.User sess = create_session() expected = [(u, u.name) for u in sess.query(User).all()] for add_col in (User.name, users.c.name): assert sess.query(User).add_column(add_col).all() == expected sess.expunge_all() assert_raises( sa_exc.InvalidRequestError, sess.query(User).add_column, object()) def test_add_multi_columns(self): """test that add_column accepts a FROM clause.""" users, User = self.tables.users, self.classes.User sess = create_session() eq_( sess.query(User.id).add_column(users).all(), [(7, 7, 'jack'), (8, 8, 'ed'), (9, 9, 'fred'), (10, 10, 'chuck')] ) def test_multi_columns_2(self): """test aliased/nonalised joins with the usage of add_column()""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0) ] q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin('addresses').\ add_column(func.count(Address.id).label('count')) eq_(q.all(), expected) sess.expunge_all() adalias = aliased(Address) q = sess.query(User) q = q.group_by(users).order_by(User.id). \ outerjoin(adalias, 'addresses').\ add_column(func.count(adalias.id).label('count')) eq_(q.all(), expected) sess.expunge_all() # TODO: figure out why group_by(users) doesn't work here s = select([users, func.count(addresses.c.id).label('count')]). \ select_from(users.outerjoin(addresses)). \ group_by(*[c for c in users.c]).order_by(User.id) q = sess.query(User) l = q.add_column("count").from_statement(s).all() assert l == expected def test_raw_columns(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [ (user7, 1, "Name:jack"), (user8, 3, "Name:ed"), (user9, 1, "Name:fred"), (user10, 0, "Name:chuck")] adalias = addresses.alias() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected # test with a straight statement s = select( [ users, func.count(addresses.c.id).label('count'), ("Name:" + users.c.name).label('concat')], from_obj=[users.outerjoin(addresses)], group_by=[c for c in users.c], order_by=[users.c.id]) q = create_session().query(User) l = q.add_column("count").add_column("concat").from_statement(s).all() assert l == expected sess.expunge_all() # test with select_entity_from() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).select_entity_from(users.outerjoin(addresses))\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin('addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() def test_expression_selectable_matches_mzero(self): User, Address = self.classes.User, self.classes.Address ua = aliased(User) aa = aliased(Address) s = create_session() for crit, j, exp in [ ( User.id + Address.id, User.addresses, "SELECT users.id + addresses.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id"), ( User.id + Address.id, Address.user, "SELECT users.id + addresses.id AS anon_1 " "FROM addresses JOIN users ON users.id = " "addresses.user_id"), ( Address.id + User.id, User.addresses, "SELECT addresses.id + users.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id"), ( User.id + aa.id, (aa, User.addresses), "SELECT users.id + addresses_1.id AS anon_1 " "FROM users JOIN addresses AS addresses_1 " "ON users.id = addresses_1.user_id"), ]: q = s.query(crit) mzero = q._mapper_zero() assert mzero.mapped_table is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) for crit, j, exp in [ ( ua.id + Address.id, ua.addresses, "SELECT users_1.id + addresses.id AS anon_1 " "FROM users AS users_1 JOIN addresses " "ON users_1.id = addresses.user_id"), ( ua.id + aa.id, (aa, ua.addresses), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM users AS users_1 JOIN addresses AS " "addresses_1 ON users_1.id = addresses_1.user_id"), ( ua.id + aa.id, (ua, aa.user), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM addresses AS addresses_1 JOIN " "users AS users_1 " "ON users_1.id = addresses_1.user_id") ]: q = s.query(crit) mzero = q._mapper_zero() assert inspect(mzero).selectable is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) def test_aliased_adapt_on_names(self): User, Address = self.classes.User, self.classes.Address sess = Session() agg_address = sess.query( Address.id, func.sum(func.length(Address.email_address)). label('email_address')).group_by(Address.user_id) ag1 = aliased(Address, agg_address.subquery()) ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True) # first, without adapt on names, 'email_address' isn't matched up - we # get the raw "address" element in the SELECT self.assert_compile( sess.query(User, ag1.email_address).join(ag1, User.addresses). filter(ag1.email_address > 5), "SELECT users.id " "AS users_id, users.name AS users_name, addresses.email_address " "AS addresses_email_address FROM addresses, users JOIN " "(SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id " "WHERE addresses.email_address > :email_address_1") # second, 'email_address' matches up to the aggreagte, and we get a # smooth JOIN from users->subquery and that's it self.assert_compile( sess.query(User, ag2.email_address).join(ag2, User.addresses). filter(ag2.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email_address AS anon_1_email_address FROM users " "JOIN (" "SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id " "WHERE anon_1.email_address > :email_address_1",) class SelectFromTest(QueryTest, AssertsCompiledSQL): run_setup_mappers = None __dialect__ = 'default' def test_replace_with_select(self): users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper( User, users, properties={ 'addresses': relationship(Address)}) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])).alias() sess = create_session() eq_( sess.query(User).select_entity_from(sel).all(), [User(id=7), User(id=8)]) eq_( sess.query(User).select_entity_from(sel). filter(User.id == 8).all(), [User(id=8)]) eq_( sess.query(User).select_entity_from(sel). order_by(desc(User.name)).all(), [ User(name='jack', id=7), User(name='ed', id=8)]) eq_( sess.query(User).select_entity_from(sel). order_by(asc(User.name)).all(), [ User(name='ed', id=8), User(name='jack', id=7)]) eq_( sess.query(User).select_entity_from(sel). options(joinedload('addresses')).first(), User(name='jack', addresses=[Address(id=1)])) def test_join_mapper_order_by(self): """test that mapper-level order_by is adapted to a selectable.""" User, users = self.classes.User, self.tables.users mapper(User, users, order_by=users.c.id) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_( sess.query(User).select_entity_from(sel).all(), [ User(name='jack', id=7), User(name='ed', id=8)]) def test_differentiate_self_external(self): """test some different combinations of joining a table to a subquery of itself.""" users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() sel = sess.query(User).filter(User.id.in_([7, 8])).subquery() ualias = aliased(User) self.assert_compile( sess.query(User).join(sel, User.id > sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM users " "WHERE users.id IN (:id_1, :id_2)) " "AS anon_1 ON users.id > anon_1.id",) self.assert_compile( sess.query(ualias).select_entity_from(sel). filter(ualias.id > sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users AS users_1, (" "SELECT users.id AS id, users.name AS name FROM users " "WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "WHERE users_1.id > anon_1.id",) self.assert_compile( sess.query(ualias).select_entity_from(sel). join(ualias, ualias.id > sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id") self.assert_compile( sess.query(ualias).select_entity_from(sel). join(ualias, ualias.id > User.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id") salias = aliased(User, sel) self.assert_compile( sess.query(salias).join(ualias, ualias.id > salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id",) self.assert_compile( sess.query(ualias).select_entity_from( join(sel, ualias, ualias.id > sel.c.id)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM " "(SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id") def test_aliased_class_vs_nonaliased(self): User, users = self.classes.User, self.tables.users mapper(User, users) ua = aliased(User) sess = create_session() self.assert_compile( sess.query(User).select_from(ua).join(User, ua.name > User.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users AS users_1 JOIN users ON users_1.name > users.name" ) self.assert_compile( sess.query(User.name).select_from(ua). join(User, ua.name > User.name), "SELECT users.name AS users_name FROM users AS users_1 " "JOIN users ON users_1.name > users.name" ) self.assert_compile( sess.query(ua.name).select_from(ua). join(User, ua.name > User.name), "SELECT users_1.name AS users_1_name FROM users AS users_1 " "JOIN users ON users_1.name > users.name" ) self.assert_compile( sess.query(ua).select_from(User).join(ua, ua.name > User.name), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users JOIN users AS users_1 ON users_1.name > users.name" ) self.assert_compile( sess.query(ua).select_from(User).join(ua, User.name > ua.name), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users JOIN users AS users_1 ON users.name > users_1.name" ) # this is tested in many other places here, just adding it # here for comparison self.assert_compile( sess.query(User.name).select_entity_from( users.select().where(users.c.id > 5)), "SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, " "users.name AS name FROM users WHERE users.id > :id_1) AS anon_1") def test_join_no_order_by(self): User, users = self.classes.User, self.tables.users mapper(User, users) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_( sess.query(User).select_entity_from(sel).all(), [User(name='jack', id=7), User(name='ed', id=8)]) def test_join_relname_from_selected_from(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties= {'addresses': relationship(mapper(Address, addresses), backref='user')}) sess = create_session() self.assert_compile( sess.query(User).select_from(Address).join("user"), "SELECT users.id AS users_id, users.name AS users_name " "FROM addresses JOIN users ON users.id = addresses.user_id" ) def test_filter_by_selected_from(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties= {'addresses': relationship(mapper(Address, addresses))}) sess = create_session() self.assert_compile( sess.query(User).select_from(Address). filter_by(email_address='ed').join(User), "SELECT users.id AS users_id, users.name AS users_name " "FROM addresses JOIN users ON users.id = addresses.user_id " "WHERE addresses.email_address = :email_address_1" ) def test_join_ent_selected_from(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties= {'addresses': relationship(mapper(Address, addresses))}) sess = create_session() self.assert_compile( sess.query(User).select_from(Address).join(User), "SELECT users.id AS users_id, users.name AS users_name " "FROM addresses JOIN users ON users.id = addresses.user_id" ) def test_join(self): users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={'addresses': relationship(Address)}) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_( sess.query(User).select_entity_from(sel).join('addresses'). add_entity(Address).order_by(User.id).order_by(Address.id).all(), [ ( User(name='jack', id=7), Address(user_id=7, email_address='jack@bean.com', id=1)), ( User(name='ed', id=8), Address(user_id=8, email_address='ed@wood.com', id=2)), ( User(name='ed', id=8), Address( user_id=8, email_address='ed@bettyboop.com', id=3)), ( User(name='ed', id=8), Address(user_id=8, email_address='ed@lala.com', id=4))]) adalias = aliased(Address) eq_( sess.query(User).select_entity_from(sel). join(adalias, 'addresses').add_entity(adalias).order_by(User.id). order_by(adalias.id).all(), [ ( User(name='jack', id=7), Address(user_id=7, email_address='jack@bean.com', id=1)), ( User(name='ed', id=8), Address(user_id=8, email_address='ed@wood.com', id=2)), ( User(name='ed', id=8), Address( user_id=8, email_address='ed@bettyboop.com', id=3)), ( User(name='ed', id=8), Address(user_id=8, email_address='ed@lala.com', id=4))]) def test_more_joins(self): ( users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords) = \ ( self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper( User, users, properties={ 'orders': relationship(Order, backref='user')}) # o2m, m2o mapper( Order, orders, properties={ 'items': relationship( Item, secondary=order_items, order_by=items.c.id)}) # m2m mapper( Item, items, properties={ 'keywords': relationship( Keyword, secondary=item_keywords, order_by=keywords.c.id)}) # m2m mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) eq_( sess.query(User).select_entity_from(sel). join('orders', 'items', 'keywords'). filter(Keyword.name.in_(['red', 'big', 'round'])).all(), [User(name='jack', id=7)]) eq_( sess.query(User).select_entity_from(sel). join('orders', 'items', 'keywords', aliased=True). filter(Keyword.name.in_(['red', 'big', 'round'])).all(), [User(name='jack', id=7)]) def test_very_nested_joins_with_joinedload(self): ( users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords) = \ ( self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper( User, users, properties={ 'orders': relationship(Order, backref='user')}) # o2m, m2o mapper( Order, orders, properties={ 'items': relationship( Item, secondary=order_items, order_by=items.c.id)}) # m2m mapper( Item, items, properties={ 'keywords': relationship( Keyword, secondary=item_keywords, order_by=keywords.c.id)}) # m2m mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) def go(): eq_( sess.query(User).select_entity_from(sel). options(joinedload_all('orders.items.keywords')). join('orders', 'items', 'keywords', aliased=True). filter(Keyword.name.in_(['red', 'big', 'round'])). all(), [ User(name='jack', orders=[ Order( description='order 1', items=[ Item( description='item 1', keywords=[ Keyword(name='red'), Keyword(name='big'), Keyword(name='round')]), Item( description='item 2', keywords=[ Keyword(name='red', id=2), Keyword(name='small', id=5), Keyword(name='square')]), Item( description='item 3', keywords=[ Keyword(name='green', id=3), Keyword(name='big', id=4), Keyword(name='round', id=6)])]), Order( description='order 3', items=[ Item( description='item 3', keywords=[ Keyword(name='green', id=3), Keyword(name='big', id=4), Keyword(name='round', id=6)]), Item(description='item 4', keywords=[], id=4), Item( description='item 5', keywords=[], id=5)]), Order( description='order 5', items=[ Item(description='item 5', keywords=[])])])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() sel2 = orders.select(orders.c.id.in_([1, 2, 3])) eq_( sess.query(Order).select_entity_from(sel2). join('items', 'keywords').filter(Keyword.name == 'red'). order_by(Order.id).all(), [ Order(description='order 1', id=1), Order(description='order 2', id=2)]) eq_( sess.query(Order).select_entity_from(sel2). join('items', 'keywords', aliased=True). filter(Keyword.name == 'red').order_by(Order.id).all(), [ Order(description='order 1', id=1), Order(description='order 2', id=2)]) def test_replace_with_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper( User, users, properties={ 'addresses': relationship(Address, order_by=addresses.c.id)}) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() def go(): eq_( sess.query(User).options(joinedload('addresses')). select_entity_from(sel).order_by(User.id).all(), [ User(id=7, addresses=[Address(id=1)]), User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_( sess.query(User).options(joinedload('addresses')). select_entity_from(sel).filter(User.id == 8).order_by(User.id). all(), [ User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_( sess.query(User).options(joinedload('addresses')). select_entity_from(sel).order_by(User.id)[1], User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])) self.assert_sql_count(testing.db, go, 1) class CustomJoinTest(QueryTest): run_setup_mappers = None def test_double_same_mappers(self): """test aliasing of joins with a custom join condition""" ( addresses, items, order_items, orders, Item, User, Address, Order, users) = \ ( self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper( Order, orders, properties={ 'items': relationship( Item, secondary=order_items, lazy='select', order_by=items.c.id)}) mapper(Item, items) mapper( User, users, properties=dict( addresses=relationship(Address, lazy='select'), open_orders=relationship( Order, primaryjoin=and_( orders.c.isopen == 1, users.c.id == orders.c.user_id), lazy='select'), closed_orders=relationship( Order, primaryjoin=and_( orders.c.isopen == 0, users.c.id == orders.c.user_id), lazy='select'))) q = create_session().query(User) eq_( q.join('open_orders', 'items', aliased=True).filter(Item.id == 4). join('closed_orders', 'items', aliased=True).filter(Item.id == 3). all(), [User(id=7)] ) class ExternalColumnsTest(QueryTest): """test mappers with SQL-expressions added as column properties.""" run_setup_mappers = None def test_external_columns_bad(self): users, User = self.tables.users, self.classes.User assert_raises_message( sa_exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={ 'concat': (users.c.id * 2), }) clear_mappers() def test_external_columns(self): """test querying mappings that reference external columns or selectables.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper( User, users, properties={ 'concat': column_property((users.c.id * 2)), 'count': column_property( select( [func.count(addresses.c.id)], users.c.id == addresses.c.user_id).correlate(users). as_scalar())}) mapper(Address, addresses, properties={ 'user': relationship(User) }) sess = create_session() sess.query(Address).options(joinedload('user')).all() eq_( sess.query(User).all(), [ User(id=7, concat=14, count=1), User(id=8, concat=16, count=3), User(id=9, concat=18, count=1), User(id=10, concat=20, count=0), ]) address_result = [ Address(id=1, user=User(id=7, concat=14, count=1)), Address(id=2, user=User(id=8, concat=16, count=3)), Address(id=3, user=User(id=8, concat=16, count=3)), Address(id=4, user=User(id=8, concat=16, count=3)), Address(id=5, user=User(id=9, concat=18, count=1)) ] eq_(sess.query(Address).all(), address_result) # run the eager version twice to test caching of aliased clauses for x in range(2): sess.expunge_all() def go(): eq_( sess.query(Address).options(joinedload('user')). order_by(Address.id).all(), address_result) self.assert_sql_count(testing.db, go, 1) ualias = aliased(User) eq_( sess.query(Address, ualias).join(ualias, 'user').all(), [(address, address.user) for address in address_result] ) eq_( sess.query(Address, ualias.count).join(ualias, 'user'). join('user', aliased=True).order_by(Address.id).all(), [ (Address(id=1), 1), (Address(id=2), 3), (Address(id=3), 3), (Address(id=4), 3), (Address(id=5), 1) ] ) eq_( sess.query(Address, ualias.concat, ualias.count). join(ualias, 'user'). join('user', aliased=True).order_by(Address.id).all(), [ (Address(id=1), 14, 1), (Address(id=2), 16, 3), (Address(id=3), 16, 3), (Address(id=4), 16, 3), (Address(id=5), 18, 1) ] ) ua = aliased(User) eq_( sess.query(Address, ua.concat, ua.count). select_entity_from(join(Address, ua, 'user')). options(joinedload(Address.user)).order_by(Address.id).all(), [ (Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1), (Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1) ]) eq_( list( sess.query(Address).join('user'). values(Address.id, User.id, User.concat, User.count)), [ (1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)]) eq_( list( sess.query(Address, ua). select_entity_from(join(Address, ua, 'user')). values(Address.id, ua.id, ua.concat, ua.count)), [ (1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)]) def test_external_columns_joinedload(self): users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) # in this test, we have a subquery on User that accesses "addresses", # underneath an joinedload for "addresses". So the "addresses" alias # adapter needs to *not* hit the "addresses" table within the "user" # subquery, but "user" still needs to be adapted. therefore the long # standing practice of eager adapters being "chained" has been removed # since its unnecessary and breaks this exact condition. mapper( User, users, properties={ 'addresses': relationship( Address, backref='user', order_by=addresses.c.id), 'concat': column_property((users.c.id * 2)), 'count': column_property( select( [func.count(addresses.c.id)], users.c.id == addresses.c.user_id).correlate(users))}) mapper(Address, addresses) mapper( Order, orders, properties={ 'address': relationship(Address)}) # m2o sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')). \ get(1) eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')). \ first() eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) def test_external_columns_compound(self): # see [ticket:2167] for background users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper( User, users, properties={ 'fullname': column_property(users.c.name.label('x'))}) mapper( Address, addresses, properties={ 'username': column_property( select([User.fullname]). where(User.id == addresses.c.user_id).label('y'))}) sess = create_session() a1 = sess.query(Address).first() eq_(a1.username, "jack") sess = create_session() a1 = sess.query(Address).from_self().first() eq_(a1.username, "jack") class TestOverlyEagerEquivalentCols(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( 'base', metadata, Column( 'id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) Table( 'sub1', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(50)) ) Table( 'sub2', metadata, Column( 'id', Integer, ForeignKey('base.id'), ForeignKey('sub1.id'), primary_key=True), Column('data', String(50)) ) def test_equivs(self): base, sub2, sub1 = ( self.tables.base, self.tables.sub2, self.tables.sub1) class Base(fixtures.ComparableEntity): pass class Sub1(fixtures.ComparableEntity): pass class Sub2(fixtures.ComparableEntity): pass mapper(Base, base, properties={ 'sub1': relationship(Sub1), 'sub2': relationship(Sub2) }) mapper(Sub1, sub1) mapper(Sub2, sub2) sess = create_session() s11 = Sub1(data='s11') s12 = Sub1(data='s12') s2 = Sub2(data='s2') b1 = Base(data='b1', sub1=[s11], sub2=[]) b2 = Base(data='b1', sub1=[s12], sub2=[]) sess.add(b1) sess.add(b2) sess.flush() # there's an overlapping ForeignKey here, so not much option except # to artificially control the flush order b2.sub2 = [s2] sess.flush() q = sess.query(Base).outerjoin('sub2', aliased=True) assert sub1.c.id not in q._filter_aliases.equivalents eq_( sess.query(Base).join('sub1').outerjoin('sub2', aliased=True). filter(Sub1.id == 1).one(), b1 ) class LabelCollideTest(fixtures.MappedTest): """Test handling for a label collision. This collision is handled by core, see ticket:2702 as well as test/sql/test_selectable->WithLabelsTest. here we want to make sure the end result is as we expect. """ @classmethod def define_tables(cls, metadata): Table( 'foo', metadata, Column('id', Integer, primary_key=True), Column('bar_id', Integer) ) Table('foo_bar', metadata, Column('id', Integer, primary_key=True)) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass @classmethod def setup_mappers(cls): mapper(cls.classes.Foo, cls.tables.foo) mapper(cls.classes.Bar, cls.tables.foo_bar) @classmethod def insert_data(cls): s = Session() s.add_all([ cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3) ]) s.commit() def test_overlap_plain(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0) def test_overlap_subquery(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0) # # {c) 2017 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import os import json from collections import Mapping from ansible.module_utils.network_common import Template from ansible.module_utils.six import iteritems, string_types from ansible.errors import AnsibleError try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False try: import textfsm HAS_TEXTFSM = True except ImportError: HAS_TEXTFSM = False try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() def re_matchall(regex, value): objects = list() for match in re.findall(regex.pattern, value, re.M): obj = {} if regex.groupindex: for name, index in iteritems(regex.groupindex): if len(regex.groupindex) == 1: obj[name] = match else: obj[name] = match[index - 1] objects.append(obj) return objects def re_search(regex, value): obj = {} match = regex.search(value, re.M) if match: items = list(match.groups()) if regex.groupindex: for name, index in iteritems(regex.groupindex): obj[name] = items[index - 1] return obj def parse_cli(output, tmpl): if not isinstance(output, string_types): raise AnsibleError("parse_cli input should be a string, but was given a input of %s" % (type(output))) if not os.path.exists(tmpl): raise AnsibleError('unable to locate parse_cli template: %s' % tmpl) try: template = Template() except ImportError as exc: raise AnsibleError(str(exc)) spec = yaml.safe_load(open(tmpl).read()) obj = {} for name, attrs in iteritems(spec['keys']): value = attrs['value'] try: variables = spec.get('vars', {}) value = template(value, variables) except: pass if 'start_block' in attrs and 'end_block' in attrs: start_block = re.compile(attrs['start_block']) end_block = re.compile(attrs['end_block']) blocks = list() lines = None block_started = False for line in output.split('\n'): match_start = start_block.match(line) match_end = end_block.match(line) if match_start: if lines: blocks.append('\n'.join(lines)) lines = list() lines.append(line) block_started = True elif match_end: if lines: lines.append(line) block_started = False elif block_started: if lines: lines.append(line) regex_items = [re.compile(r) for r in attrs['items']] objects = list() for block in blocks: if isinstance(value, Mapping) and 'key' not in value: items = list() for regex in regex_items: match = regex.search(block) if match: item_values = match.groupdict() item_values['match'] = list(match.groups()) items.append(item_values) else: items.append(None) obj = {} for k, v in iteritems(value): try: obj[k] = template(v, {'item': items}, fail_on_undefined=False) except: obj[k] = None objects.append(obj) elif isinstance(value, Mapping): items = list() for regex in regex_items: match = regex.search(block) if match: item_values = match.groupdict() item_values['match'] = list(match.groups()) items.append(item_values) else: items.append(None) key = template(value['key'], {'item': items}) values = dict([(k, template(v, {'item': items})) for k, v in iteritems(value['values'])]) objects.append({key: values}) return objects elif 'items' in attrs: regexp = re.compile(attrs['items']) when = attrs.get('when') conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when if isinstance(value, Mapping) and 'key' not in value: values = list() for item in re_matchall(regexp, output): entry = {} for item_key, item_value in iteritems(value): entry[item_key] = template(item_value, {'item': item}) if when: if template(conditional, {'item': entry}): values.append(entry) else: values.append(entry) obj[name] = values elif isinstance(value, Mapping): values = dict() for item in re_matchall(regexp, output): entry = {} for item_key, item_value in iteritems(value['values']): entry[item_key] = template(item_value, {'item': item}) key = template(value['key'], {'item': item}) if when: if template(conditional, {'item': {'key': key, 'value': entry}}): values[key] = entry else: values[key] = entry obj[name] = values else: item = re_search(regexp, output) obj[name] = template(value, {'item': item}) else: obj[name] = value return obj def parse_cli_textfsm(value, template): if not HAS_TEXTFSM: raise AnsibleError('parse_cli_textfsm filter requires TextFSM library to be installed') if not isinstance(value, string_types): raise AnsibleError("parse_cli_textfsm input should be a string, but was given a input of %s" % (type(value))) if not os.path.exists(template): raise AnsibleError('unable to locate parse_cli_textfsm template: %s' % template) try: template = open(template) except IOError as exc: raise AnsibleError(str(exc)) re_table = textfsm.TextFSM(template) fsm_results = re_table.ParseText(value) results = list() for item in fsm_results: results.append(dict(zip(re_table.header, item))) return results class FilterModule(object): """Filters for working with output from network devices""" filter_map = { 'parse_cli': parse_cli, 'parse_cli_textfsm': parse_cli_textfsm } def filters(self): return self.filter_map from mediacrush.config import _cfgi, _cfg from mediacrush.paths import shard import base64 import string import os digs = string.digits + string.letters + "-_" # http://stackoverflow.com/posts/2267446/revisions def int2base(x, base): if x < 0: sign = -1 elif x == 0: return '0' else: sign = 1 x *= sign digits = [] while x: digits.append(digs[x % base]) x /= base if sign < 0: digits.append('-') digits.reverse() return ''.join(digits) def init(args): folder = _cfg("storage_folder") sharding_level = _cfgi("sharding") for i in range(64 ** sharding_level): try: os.mkdir(os.path.join(folder, int2base(i, 64))) except OSError, e: print(e) def migrate(args): base = _cfg("storage_folder") for f in os.listdir(base): path = os.path.join(base, f) if os.path.isfile(path): newpath = os.path.join(base, shard(f)) try: print("Moving " + path + " into " + newpath) os.rename(path, newpath) except: print("Move failed") #!/usr/bin/python """ PN-CLI vrouter-ospf-add/remove """ # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: pn_ospfarea author: "Pluribus Networks (@amitsi)" version_added: "2.2" short_description: CLI command to add/remove ospf area to/from a vrouter. deprecated: removed_in: '2.12' why: Doesn't support latest Pluribus Networks netvisor alternative: Latest modules will be pushed in Ansible future versions. description: - Execute vrouter-ospf-add, vrouter-ospf-remove command. - This command adds/removes Open Shortest Path First(OSPF) area to/from a virtual router(vRouter) service. options: pn_cliusername: description: - Login username. required: true pn_clipassword: description: - Login password. required: true pn_cliswitch: description: - Target switch(es) to run the CLI on. required: False state: description: - State the action to perform. Use 'present' to add ospf-area, 'absent' to remove ospf-area and 'update' to modify ospf-area. required: true choices: ['present', 'absent', 'update'] pn_vrouter_name: description: - Specify the name of the vRouter. required: true pn_ospf_area: description: - Specify the OSPF area number. required: true pn_stub_type: description: - Specify the OSPF stub type. choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary'] pn_prefix_listin: description: - OSPF prefix list for filtering incoming packets. pn_prefix_listout: description: - OSPF prefix list for filtering outgoing packets. pn_quiet: description: - Enable/disable system information. required: false type: bool default: true """ EXAMPLES = """ - name: "Add OSPF area to vrouter" pn_ospfarea: state: present pn_cliusername: admin pn_clipassword: admin pn_ospf_area: 1.0.0.0 pn_stub_type: stub - name: "Remove OSPF from vrouter" pn_ospf: state: absent pn_cliusername: admin pn_clipassword: admin pn_vrouter_name: name-string pn_ospf_area: 1.0.0.0 """ RETURN = """ command: description: The CLI command run on the target node(s). returned: always type: str stdout: description: The set of responses from the ospf command. returned: always type: list stderr: description: The set of error responses from the ospf command. returned: on error type: list changed: description: Indicates whether the CLI caused changes on the target. returned: always type: bool """ import shlex # AnsibleModule boilerplate from ansible.module_utils.basic import AnsibleModule def get_command_from_state(state): """ This method gets appropriate command name for the state specified. It returns the command name for the specified state. :param state: The state for which the respective command name is required. """ command = None if state == 'present': command = 'vrouter-ospf-area-add' if state == 'absent': command = 'vrouter-ospf-area-remove' if state == 'update': command = 'vrouter-ospf-area-modify' return command def main(): """ This section is for arguments parsing """ module = AnsibleModule( argument_spec=dict( pn_cliusername=dict(required=True, type='str'), pn_clipassword=dict(required=True, type='str', no_log=True), pn_cliswitch=dict(required=False, type='str'), state=dict(required=True, type='str', choices=['present', 'absent', 'update']), pn_vrouter_name=dict(required=True, type='str'), pn_ospf_area=dict(required=True, type='str'), pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa', 'stub-no-summary', 'nssa-no-summary']), pn_prefix_listin=dict(type='str'), pn_prefix_listout=dict(type='str'), pn_quiet=dict(type='bool', default='True') ) ) # Accessing the arguments cliusername = module.params['pn_cliusername'] clipassword = module.params['pn_clipassword'] cliswitch = module.params['pn_cliswitch'] state = module.params['state'] vrouter_name = module.params['pn_vrouter_name'] ospf_area = module.params['pn_ospf_area'] stub_type = module.params['pn_stub_type'] prefix_listin = module.params['pn_prefix_listin'] prefix_listout = module.params['pn_prefix_listout'] quiet = module.params['pn_quiet'] command = get_command_from_state(state) # Building the CLI command string cli = '/usr/bin/cli' if quiet is True: cli += ' --quiet ' cli += ' --user %s:%s ' % (cliusername, clipassword) if cliswitch: if cliswitch == 'local': cli += ' switch-local ' else: cli += ' switch ' + cliswitch cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area) if stub_type: cli += ' stub-type ' + stub_type if prefix_listin: cli += ' prefix-list-in ' + prefix_listin if prefix_listout: cli += ' prefix-list-out ' + prefix_listout # Run the CLI command ospfcommand = shlex.split(cli) # 'out' contains the output # 'err' contains the error messages result, out, err = module.run_command(ospfcommand) # Response in JSON format if result != 0: module.exit_json( command=cli, stderr=err.rstrip("\r\n"), changed=False ) else: module.exit_json( command=cli, stdout=out.rstrip("\r\n"), changed=True ) if __name__ == '__main__': main() #!/usr/bin/env python # # vector3 and rotation matrix classes # This follows the conventions in the ArduPilot code, # and is essentially a python version of the AP_Math library # # Andrew Tridgell, March 2012 # # This library is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation; either version 2.1 of the License, or (at your # option) any later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this library; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA '''rotation matrix class ''' from math import sin, cos, sqrt, asin, atan2, pi, radians, acos class Vector3: '''a vector''' def __init__(self, x=None, y=None, z=None): if x != None and y != None and z != None: self.x = float(x) self.y = float(y) self.z = float(z) elif x != None and len(x) == 3: self.x = float(x[0]) self.y = float(x[1]) self.z = float(x[2]) elif x != None: raise ValueError('bad initialiser') else: self.x = float(0) self.y = float(0) self.z = float(0) def __repr__(self): return 'Vector3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z) def __add__(self, v): return Vector3(self.x + v.x, self.y + v.y, self.z + v.z) __radd__ = __add__ def __sub__(self, v): return Vector3(self.x - v.x, self.y - v.y, self.z - v.z) def __neg__(self): return Vector3(-self.x, -self.y, -self.z) def __rsub__(self, v): return Vector3(v.x - self.x, v.y - self.y, v.z - self.z) def __mul__(self, v): if isinstance(v, Vector3): '''dot product''' return self.x*v.x + self.y*v.y + self.z*v.z return Vector3(self.x * v, self.y * v, self.z * v) __rmul__ = __mul__ def __div__(self, v): return Vector3(self.x / v, self.y / v, self.z / v) def __mod__(self, v): '''cross product''' return Vector3(self.y*v.z - self.z*v.y, self.z*v.x - self.x*v.z, self.x*v.y - self.y*v.x) def __copy__(self): return Vector3(self.x, self.y, self.z) copy = __copy__ def length(self): return sqrt(self.x**2 + self.y**2 + self.z**2) def zero(self): self.x = self.y = self.z = 0 def angle(self, v): '''return the angle between this vector and another vector''' return acos(self * v) / (self.length() * v.length()) def normalized(self): return self / self.length() def normalize(self): v = self.normalized() self.x = v.x self.y = v.y self.z = v.z class Matrix3: '''a 3x3 matrix, intended as a rotation matrix''' def __init__(self, a=None, b=None, c=None): if a is not None and b is not None and c is not None: self.a = a.copy() self.b = b.copy() self.c = c.copy() else: self.identity() def __repr__(self): return 'Matrix3((%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f))' % ( self.a.x, self.a.y, self.a.z, self.b.x, self.b.y, self.b.z, self.c.x, self.c.y, self.c.z) def identity(self): self.a = Vector3(1,0,0) self.b = Vector3(0,1,0) self.c = Vector3(0,0,1) def transposed(self): return Matrix3(Vector3(self.a.x, self.b.x, self.c.x), Vector3(self.a.y, self.b.y, self.c.y), Vector3(self.a.z, self.b.z, self.c.z)) def from_euler(self, roll, pitch, yaw): '''fill the matrix from Euler angles in radians''' cp = cos(pitch) sp = sin(pitch) sr = sin(roll) cr = cos(roll) sy = sin(yaw) cy = cos(yaw) self.a.x = cp * cy self.a.y = (sr * sp * cy) - (cr * sy) self.a.z = (cr * sp * cy) + (sr * sy) self.b.x = cp * sy self.b.y = (sr * sp * sy) + (cr * cy) self.b.z = (cr * sp * sy) - (sr * cy) self.c.x = -sp self.c.y = sr * cp self.c.z = cr * cp def to_euler(self): '''find Euler angles for the matrix''' if self.c.x >= 1.0: pitch = pi elif self.c.x <= -1.0: pitch = -pi else: pitch = -asin(self.c.x) roll = atan2(self.c.y, self.c.z) yaw = atan2(self.b.x, self.a.x) return (roll, pitch, yaw) def __add__(self, m): return Matrix3(self.a + m.a, self.b + m.b, self.c + m.c) __radd__ = __add__ def __sub__(self, m): return Matrix3(self.a - m.a, self.b - m.b, self.c - m.c) def __rsub__(self, m): return Matrix3(m.a - self.a, m.b - self.b, m.c - self.c) def __mul__(self, other): if isinstance(other, Vector3): v = other return Vector3(self.a.x * v.x + self.a.y * v.y + self.a.z * v.z, self.b.x * v.x + self.b.y * v.y + self.b.z * v.z, self.c.x * v.x + self.c.y * v.y + self.c.z * v.z) elif isinstance(other, Matrix3): m = other return Matrix3(Vector3(self.a.x * m.a.x + self.a.y * m.b.x + self.a.z * m.c.x, self.a.x * m.a.y + self.a.y * m.b.y + self.a.z * m.c.y, self.a.x * m.a.z + self.a.y * m.b.z + self.a.z * m.c.z), Vector3(self.b.x * m.a.x + self.b.y * m.b.x + self.b.z * m.c.x, self.b.x * m.a.y + self.b.y * m.b.y + self.b.z * m.c.y, self.b.x * m.a.z + self.b.y * m.b.z + self.b.z * m.c.z), Vector3(self.c.x * m.a.x + self.c.y * m.b.x + self.c.z * m.c.x, self.c.x * m.a.y + self.c.y * m.b.y + self.c.z * m.c.y, self.c.x * m.a.z + self.c.y * m.b.z + self.c.z * m.c.z)) v = other return Matrix3(self.a * v, self.b * v, self.c * v) def __div__(self, v): return Matrix3(self.a / v, self.b / v, self.c / v) def __neg__(self): return Matrix3(-self.a, -self.b, -self.c) def __copy__(self): return Matrix3(self.a, self.b, self.c) copy = __copy__ def rotate(self, g): '''rotate the matrix by a given amount on 3 axes''' temp_matrix = Matrix3() a = self.a b = self.b c = self.c temp_matrix.a.x = a.y * g.z - a.z * g.y temp_matrix.a.y = a.z * g.x - a.x * g.z temp_matrix.a.z = a.x * g.y - a.y * g.x temp_matrix.b.x = b.y * g.z - b.z * g.y temp_matrix.b.y = b.z * g.x - b.x * g.z temp_matrix.b.z = b.x * g.y - b.y * g.x temp_matrix.c.x = c.y * g.z - c.z * g.y temp_matrix.c.y = c.z * g.x - c.x * g.z temp_matrix.c.z = c.x * g.y - c.y * g.x self.a += temp_matrix.a self.b += temp_matrix.b self.c += temp_matrix.c def normalize(self): '''re-normalise a rotation matrix''' error = self.a * self.b t0 = self.a - (self.b * (0.5 * error)) t1 = self.b - (self.a * (0.5 * error)) t2 = t0 % t1 self.a = t0 * (1.0 / t0.length()) self.b = t1 * (1.0 / t1.length()) self.c = t2 * (1.0 / t2.length()) def trace(self): '''the trace of the matrix''' return self.a.x + self.b.y + self.c.z def test_euler(): '''check that from_euler() and to_euler() are consistent''' m = Matrix3() from math import radians, degrees for r in range(-179, 179, 3): for p in range(-89, 89, 3): for y in range(-179, 179, 3): m.from_euler(radians(r), radians(p), radians(y)) (r2, p2, y2) = m.to_euler() v1 = Vector3(r,p,y) v2 = Vector3(degrees(r2),degrees(p2),degrees(y2)) diff = v1 - v2 if diff.length() > 1.0e-12: print('EULER ERROR:', v1, v2, diff.length()) if __name__ == "__main__": import doctest doctest.testmod() test_euler() ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Shy Shalom # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .enums import ProbingState # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers ### General ideas of the Hebrew charset recognition ### # # Four main charsets exist in Hebrew: # "ISO-8859-8" - Visual Hebrew # "windows-1255" - Logical Hebrew # "ISO-8859-8-I" - Logical Hebrew # "x-mac-hebrew" - ?? Logical Hebrew ?? # # Both "ISO" charsets use a completely identical set of code points, whereas # "windows-1255" and "x-mac-hebrew" are two different proper supersets of # these code points. windows-1255 defines additional characters in the range # 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific # diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. # x-mac-hebrew defines similar additional code points but with a different # mapping. # # As far as an average Hebrew text with no diacritics is concerned, all four # charsets are identical with respect to code points. Meaning that for the # main Hebrew alphabet, all four map the same values to all 27 Hebrew letters # (including final letters). # # The dominant difference between these charsets is their directionality. # "Visual" directionality means that the text is ordered as if the renderer is # not aware of a BIDI rendering algorithm. The renderer sees the text and # draws it from left to right. The text itself when ordered naturally is read # backwards. A buffer of Visual Hebrew generally looks like so: # "[last word of first line spelled backwards] [whole line ordered backwards # and spelled backwards] [first word of first line spelled backwards] # [end of line] [last word of second line] ... etc' " # adding punctuation marks, numbers and English text to visual text is # naturally also "visual" and from left to right. # # "Logical" directionality means the text is ordered "naturally" according to # the order it is read. It is the responsibility of the renderer to display # the text from right to left. A BIDI algorithm is used to place general # punctuation marks, numbers and English text in the text.