diff --git "a/codeparrot-valid_1021.txt" "b/codeparrot-valid_1021.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1021.txt" @@ -0,0 +1,10000 @@ + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & 0x08: + index = pos_count + keyword_only_count + if func_code.co_flags & 0x04: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=False) + + @property + def parameters(self): + return self._parameters + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, *, parameters=_void, return_annotation=_void): + '''Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + ''' + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + def __eq__(self, other): + if (not issubclass(type(other), Signature) or + self.return_annotation != other.return_annotation or + len(self.parameters) != len(other.parameters)): + return False + + other_positions = {param: idx + for idx, param in enumerate(other.parameters.keys())} + + for idx, (param_name, param) in enumerate(self.parameters.items()): + if param.kind == _KEYWORD_ONLY: + try: + other_param = other.parameters[param_name] + except KeyError: + return False + else: + if param != other_param: + return False + else: + try: + other_idx = other_positions[param_name] + except KeyError: + return False + else: + if (idx != other_idx or + param != other.parameters[param_name]): + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def _bind(self, args, kwargs, *, partial=False): + '''Private method. Don't use directly.''' + + arguments = OrderedDict() + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + if partial: + # Support for binding arguments to 'functools.partial' objects. + # See 'functools.partial' case in 'signature()' implementation + # for details. + for param_name, param in self.parameters.items(): + if (param._partial_kwarg and param_name not in kwargs): + # Simulating 'functools.partial' behavior + kwargs[param_name] = param.default + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + msg = '{arg!r} parameter is positional only, ' \ + 'but was passed as a keyword' + msg = msg.format(arg=param.name) + raise TypeError(msg) from None + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + if partial: + parameters_ex = (param,) + break + else: + msg = '{arg!r} parameter lacking default value' + msg = msg.format(arg=param.name) + raise TypeError(msg) from None + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') from None + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError('too many positional arguments') + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs: + raise TypeError('multiple values for argument ' + '{arg!r}'.format(arg=param.name)) + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _POSITIONAL_ONLY: + # This should never happen in case of a properly built + # Signature object (but let's have this check here + # to ensure correct behaviour just in case) + raise TypeError('{arg!r} parameter is positional only, ' + 'but was passed as a keyword'. \ + format(arg=param.name)) + + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('{arg!r} parameter lacking default value'. \ + format(arg=param_name)) from None + + else: + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + else: + raise TypeError('too many keyword arguments') + + return self._bound_arguments_cls(self, arguments) + + def bind(__bind_self, *args, **kwargs): + '''Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + ''' + return __bind_self._bind(args, kwargs) + + def bind_partial(__bind_self, *args, **kwargs): + '''Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + ''' + return __bind_self._bind(args, kwargs, partial=True) + + def __str__(self): + result = [] + render_kw_only_separator = True + for idx, param in enumerate(self.parameters.values()): + formatted = str(param) + + kind = param.kind + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + rendered = '({})'.format(', '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation) + rendered += ' -> {}'.format(anno) + + return rendered + +#!/usr/bin/env python +# coding=utf-8 +from pybrain.tools.shortcuts import buildNetwork +from pybrain.supervised.trainers import BackpropTrainer +from src.supervised_learning import dataset +from math import sqrt + +import cPickle as pickle + + +def build(input_size, hidden_size, target_size): + return buildNetwork(input_size, hidden_size, target_size, bias=True) + +def train(network, dataset, epochs): + trainer = BackpropTrainer(network, dataset) + # trainer.trainUntilConvergence(verbose=True) + # + for i in range(epochs): + mse = trainer.train() + rmse = sqrt(mse) + print "training RMSE, epoch {}: {}".format(i + 1, rmse) + +def load_from_file(filename): + network = None + with open(filename, 'r') as pickle_file: + network = pickle.load(pickle_file) + return network + +def save_to_file(filename, network): + pickle.dump(network, open(filename, 'wb')) + +def train_and_save(input_size, + output_size, + hidden_size, + training_epochs, + network_filename, + dataset_filename): + + network = build(input_size, hidden_size, output_size) + ds = dataset.load_from_file(dataset_filename) + train(network, ds, training_epochs) + save_to_file(network_filename, network) + +def rnd_config(): + return { + "network_filename": "network/rnd_net.pickle", + "dataset_filename": "datasets/rnd.data", + } + +def best_avg_config(): + return { + "network_filename": "network/best_avg_net.pickle", + "dataset_filename": "datasets/best_avg.data", + } + +def thinking_config(): + return { + "network_filename": "network/thinking_net.pickle", + "dataset_filename": "datasets/thinking.data", + } + +def mixed_config(): + return { + "network_filename": "network/mixed_net.pickle", + "dataset_filename": "datasets/mixed.data", + } + +if __name__ == '__main__': + input_size = 9 + output_size = 1 + hidden_size = 15 + training_epochs = 200 + train_and_save( + input_size, + output_size, + hidden_size, + training_epochs, + **mixed_config()) + + +# -*- coding: utf-8 -*- + +import sys +import re +import zlib +import urllib2 + +import xml.etree.ElementTree as et +default_encoding = 'utf-8' +if sys.getdefaultencoding() != default_encoding: + reload(sys) + sys.setdefaultencoding(default_encoding) + +class Feedback(): + """Feeback used by Alfred Script Filter + + Usage: + fb = Feedback() + fb.add_item('Hello', 'World') + fb.add_item('Foo', 'Bar') + print fb + + """ + + def __init__(self): + self.feedback = et.Element('items') + + def __repr__(self): + """XML representation used by Alfred + + Returns: + XML string + """ + return et.tostring(self.feedback) + + def add_item(self, title, subtitle = "", arg = "", valid = "yes", autocomplete = "", icon = "icon.png"): + """ + Add item to alfred Feedback + + Args: + title(str): the title displayed by Alfred + Keyword Args: + subtitle(str): the subtitle displayed by Alfred + arg(str): the value returned by alfred when item is selected + valid(str): whether or not the entry can be selected in Alfred to trigger an action + autcomplete(str): the text to be inserted if an invalid item is selected. This is only used if 'valid' is 'no' + icon(str): filename of icon that Alfred will display + """ + item = et.SubElement(self.feedback, 'item', uid=str(len(self.feedback)), arg=arg, valid=valid, autocomplete=autocomplete) + _title = et.SubElement(item, 'title') + _title.text = title + _sub = et.SubElement(item, 'subtitle') + _sub.text = subtitle + _icon = et.SubElement(item, 'icon') + _icon.text = icon + +query = '{query}' +url = "http://www.bilibili.com/search?keyword=%s&orderby=&formsubmit="%query +req = urllib2.Request(url = url); +content = urllib2.urlopen(req,timeout = 10).read(); +content = zlib.decompress(content, 16+zlib.MAX_WBITS) + +reg = r'
([^<]*)([^<]*)
'; +result = re.findall(reg,content,re.S) +fb = Feedback() + +try: + for item in result: + avnum = item[0] + avtype = item[1] + title = item[2].strip() + fb.add_item(title,subtitle="%s : http://www.bilibili.tv/video/%s"%(avtype,avnum),arg=avnum) + +except SyntaxError as e: + if ('EOF', 'EOL' in e.msg): + fb.add_item('...') + else: + fb.add_item('SyntaxError', e.msg) +except Exception as e: + fb.add_item(e.__class__.__name__,subtitle=e.message) +print fb + +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Building Blocks of TensorFlow Debugger Command-Line Interface.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import os +import re +import sre_constants +import traceback + +import numpy as np +import six + +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.platform import gfile + +HELP_INDENT = " " + +EXPLICIT_USER_EXIT = "explicit_user_exit" +REGEX_MATCH_LINES_KEY = "regex_match_lines" +INIT_SCROLL_POS_KEY = "init_scroll_pos" + +MAIN_MENU_KEY = "mm:" + + +class CommandLineExit(Exception): + + def __init__(self, exit_token=None): + Exception.__init__(self) + self._exit_token = exit_token + + @property + def exit_token(self): + return self._exit_token + + +class RichLine(object): + """Rich single-line text. + + Attributes: + text: A plain string, the raw text represented by this object. Should not + contain newlines. + font_attr_segs: A list of (start, end, font attribute) triples, representing + richness information applied to substrings of text. + """ + + def __init__(self, text="", font_attr=None): + """Construct a RichLine with no rich attributes or a single attribute. + + Args: + text: Raw text string + font_attr: If specified, a single font attribute to be applied to the + entire text. Extending this object via concatenation allows creation + of text with varying attributes. + """ + # TODO(ebreck) Make .text and .font_attr protected members when we no + # longer need public access. + self.text = text + if font_attr: + self.font_attr_segs = [(0, len(text), font_attr)] + else: + self.font_attr_segs = [] + + def __add__(self, other): + """Concatenate two chunks of maybe rich text to make a longer rich line. + + Does not modify self. + + Args: + other: Another piece of text to concatenate with this one. + If it is a plain str, it will be appended to this string with no + attributes. If it is a RichLine, it will be appended to this string + with its attributes preserved. + + Returns: + A new RichLine comprising both chunks of text, with appropriate + attributes applied to the corresponding substrings. + """ + ret = RichLine() + if isinstance(other, six.string_types): + ret.text = self.text + other + ret.font_attr_segs = self.font_attr_segs[:] + return ret + elif isinstance(other, RichLine): + ret.text = self.text + other.text + ret.font_attr_segs = self.font_attr_segs[:] + old_len = len(self.text) + for start, end, font_attr in other.font_attr_segs: + ret.font_attr_segs.append((old_len + start, old_len + end, font_attr)) + return ret + else: + raise TypeError("%r cannot be concatenated with a RichLine" % other) + + def __len__(self): + return len(self.text) + + +def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None): + """Convert a list of RichLine objects or strings to a RichTextLines object. + + Args: + rich_text_list: a list of RichLine objects or strings + annotations: annotations for the resultant RichTextLines object. + + Returns: + A corresponding RichTextLines object. + """ + lines = [] + font_attr_segs = {} + for i, rl in enumerate(rich_text_list): + if isinstance(rl, RichLine): + lines.append(rl.text) + if rl.font_attr_segs: + font_attr_segs[i] = rl.font_attr_segs + else: + lines.append(rl) + return RichTextLines(lines, font_attr_segs, annotations=annotations) + + +def get_tensorflow_version_lines(include_dependency_versions=False): + """Generate RichTextLines with TensorFlow version info. + + Args: + include_dependency_versions: Include the version of TensorFlow's key + dependencies, such as numpy. + + Returns: + A formatted, multi-line `RichTextLines` object. + """ + lines = ["TensorFlow version: %s" % pywrap_tf_session.__version__] + lines.append("") + if include_dependency_versions: + lines.append("Dependency version(s):") + lines.append(" numpy: %s" % np.__version__) + lines.append("") + return RichTextLines(lines) + + +class RichTextLines(object): + """Rich multi-line text. + + Line-by-line text output, with font attributes (e.g., color) and annotations + (e.g., indices in a multi-dimensional tensor). Used as the text output of CLI + commands. Can be rendered on terminal environments such as curses. + + This is not to be confused with Rich Text Format (RTF). This class is for text + lines only. + """ + + def __init__(self, lines, font_attr_segs=None, annotations=None): + """Constructor of RichTextLines. + + Args: + lines: A list of str or a single str, representing text output to + screen. The latter case is for convenience when the text output is + single-line. + font_attr_segs: A map from 0-based row index to a list of 3-tuples. + It lists segments in each row that have special font attributes, such + as colors, that are not the default attribute. For example: + {1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]} + + In each tuple, the 1st element is the start index of the segment. The + 2nd element is the end index, in an "open interval" fashion. The 3rd + element is an object or a list of objects that represents the font + attribute. Colors are represented as strings as in the examples above. + annotations: A map from 0-based row index to any object for annotating + the row. A typical use example is annotating rows of the output as + indices in a multi-dimensional tensor. For example, consider the + following text representation of a 3x2x2 tensor: + [[[0, 0], [0, 0]], + [[0, 0], [0, 0]], + [[0, 0], [0, 0]]] + The annotation can indicate the indices of the first element shown in + each row, i.e., + {0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]} + This information can make display of tensors on screen clearer and can + help the user navigate (scroll) to the desired location in a large + tensor. + + Raises: + ValueError: If lines is of invalid type. + """ + if isinstance(lines, list): + self._lines = lines + elif isinstance(lines, six.string_types): + self._lines = [lines] + else: + raise ValueError("Unexpected type in lines: %s" % type(lines)) + + self._font_attr_segs = font_attr_segs + if not self._font_attr_segs: + self._font_attr_segs = {} + # TODO(cais): Refactor to collections.defaultdict(list) to simplify code. + + self._annotations = annotations + if not self._annotations: + self._annotations = {} + # TODO(cais): Refactor to collections.defaultdict(list) to simplify code. + + @property + def lines(self): + return self._lines + + @property + def font_attr_segs(self): + return self._font_attr_segs + + @property + def annotations(self): + return self._annotations + + def num_lines(self): + return len(self._lines) + + def slice(self, begin, end): + """Slice a RichTextLines object. + + The object itself is not changed. A sliced instance is returned. + + Args: + begin: (int) Beginning line index (inclusive). Must be >= 0. + end: (int) Ending line index (exclusive). Must be >= 0. + + Returns: + (RichTextLines) Sliced output instance of RichTextLines. + + Raises: + ValueError: If begin or end is negative. + """ + + if begin < 0 or end < 0: + raise ValueError("Encountered negative index.") + + # Copy lines. + lines = self.lines[begin:end] + + # Slice font attribute segments. + font_attr_segs = {} + for key in self.font_attr_segs: + if key >= begin and key < end: + font_attr_segs[key - begin] = self.font_attr_segs[key] + + # Slice annotations. + annotations = {} + for key in self.annotations: + if not isinstance(key, int): + # Annotations can contain keys that are not line numbers. + annotations[key] = self.annotations[key] + elif key >= begin and key < end: + annotations[key - begin] = self.annotations[key] + + return RichTextLines( + lines, font_attr_segs=font_attr_segs, annotations=annotations) + + def extend(self, other): + """Extend this instance of RichTextLines with another instance. + + The extension takes effect on the text lines, the font attribute segments, + as well as the annotations. The line indices in the font attribute + segments and the annotations are adjusted to account for the existing + lines. If there are duplicate, non-line-index fields in the annotations, + the value from the input argument "other" will override that in this + instance. + + Args: + other: (RichTextLines) The other RichTextLines instance to be appended at + the end of this instance. + """ + + orig_num_lines = self.num_lines() # Record original number of lines. + + # Merge the lines. + self._lines.extend(other.lines) + + # Merge the font_attr_segs. + for line_index in other.font_attr_segs: + self._font_attr_segs[orig_num_lines + line_index] = ( + other.font_attr_segs[line_index]) + + # Merge the annotations. + for key in other.annotations: + if isinstance(key, int): + self._annotations[orig_num_lines + key] = (other.annotations[key]) + else: + self._annotations[key] = other.annotations[key] + + def _extend_before(self, other): + """Add another RichTextLines object to the front. + + Args: + other: (RichTextLines) The other object to add to the front to this + object. + """ + + other_num_lines = other.num_lines() # Record original number of lines. + + # Merge the lines. + self._lines = other.lines + self._lines + + # Merge the font_attr_segs. + new_font_attr_segs = {} + for line_index in self.font_attr_segs: + new_font_attr_segs[other_num_lines + line_index] = ( + self.font_attr_segs[line_index]) + new_font_attr_segs.update(other.font_attr_segs) + self._font_attr_segs = new_font_attr_segs + + # Merge the annotations. + new_annotations = {} + for key in self._annotations: + if isinstance(key, int): + new_annotations[other_num_lines + key] = (self.annotations[key]) + else: + new_annotations[key] = other.annotations[key] + + new_annotations.update(other.annotations) + self._annotations = new_annotations + + def append(self, line, font_attr_segs=None): + """Append a single line of text. + + Args: + line: (str) The text to be added to the end. + font_attr_segs: (list of tuples) Font attribute segments of the appended + line. + """ + + self._lines.append(line) + if font_attr_segs: + self._font_attr_segs[len(self._lines) - 1] = font_attr_segs + + def append_rich_line(self, rich_line): + self.append(rich_line.text, rich_line.font_attr_segs) + + def prepend(self, line, font_attr_segs=None): + """Prepend (i.e., add to the front) a single line of text. + + Args: + line: (str) The text to be added to the front. + font_attr_segs: (list of tuples) Font attribute segments of the appended + line. + """ + + other = RichTextLines(line) + if font_attr_segs: + other.font_attr_segs[0] = font_attr_segs + self._extend_before(other) + + def write_to_file(self, file_path): + """Write the object itself to file, in a plain format. + + The font_attr_segs and annotations are ignored. + + Args: + file_path: (str) path of the file to write to. + """ + + with gfile.Open(file_path, "w") as f: + for line in self._lines: + f.write(line + "\n") + + # TODO(cais): Add a method to allow appending to a line in RichTextLines with + # both text and font_attr_segs. + + +def regex_find(orig_screen_output, regex, font_attr): + """Perform regex match in rich text lines. + + Produces a new RichTextLines object with font_attr_segs containing highlighted + regex matches. + + Example use cases include: + 1) search for specific items in a large list of items, and + 2) search for specific numerical values in a large tensor. + + Args: + orig_screen_output: The original RichTextLines, in which the regex find + is to be performed. + regex: The regex used for matching. + font_attr: Font attribute used for highlighting the found result. + + Returns: + A modified copy of orig_screen_output. + + Raises: + ValueError: If input str regex is not a valid regular expression. + """ + new_screen_output = RichTextLines( + orig_screen_output.lines, + font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs), + annotations=orig_screen_output.annotations) + + try: + re_prog = re.compile(regex) + except sre_constants.error: + raise ValueError("Invalid regular expression: \"%s\"" % regex) + + regex_match_lines = [] + for i, line in enumerate(new_screen_output.lines): + find_it = re_prog.finditer(line) + + match_segs = [] + for match in find_it: + match_segs.append((match.start(), match.end(), font_attr)) + + if match_segs: + if i not in new_screen_output.font_attr_segs: + new_screen_output.font_attr_segs[i] = match_segs + else: + new_screen_output.font_attr_segs[i].extend(match_segs) + new_screen_output.font_attr_segs[i] = sorted( + new_screen_output.font_attr_segs[i], key=lambda x: x[0]) + regex_match_lines.append(i) + + new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines + return new_screen_output + + +def wrap_rich_text_lines(inp, cols): + """Wrap RichTextLines according to maximum number of columns. + + Produces a new RichTextLines object with the text lines, font_attr_segs and + annotations properly wrapped. This ought to be used sparingly, as in most + cases, command handlers producing RichTextLines outputs should know the + screen/panel width via the screen_info kwarg and should produce properly + length-limited lines in the output accordingly. + + Args: + inp: Input RichTextLines object. + cols: Number of columns, as an int. + + Returns: + 1) A new instance of RichTextLines, with line lengths limited to cols. + 2) A list of new (wrapped) line index. For example, if the original input + consists of three lines and only the second line is wrapped, and it's + wrapped into two lines, this return value will be: [0, 1, 3]. + Raises: + ValueError: If inputs have invalid types. + """ + + new_line_indices = [] + + if not isinstance(inp, RichTextLines): + raise ValueError("Invalid type of input screen_output") + + if not isinstance(cols, int): + raise ValueError("Invalid type of input cols") + + out = RichTextLines([]) + + row_counter = 0 # Counter for new row index + for i, line in enumerate(inp.lines): + new_line_indices.append(out.num_lines()) + + if i in inp.annotations: + out.annotations[row_counter] = inp.annotations[i] + + if len(line) <= cols: + # No wrapping. + out.lines.append(line) + if i in inp.font_attr_segs: + out.font_attr_segs[row_counter] = inp.font_attr_segs[i] + + row_counter += 1 + else: + # Wrap. + wlines = [] # Wrapped lines. + + osegs = [] + if i in inp.font_attr_segs: + osegs = inp.font_attr_segs[i] + + idx = 0 + while idx < len(line): + if idx + cols > len(line): + rlim = len(line) + else: + rlim = idx + cols + + wlines.append(line[idx:rlim]) + for seg in osegs: + if (seg[0] < rlim) and (seg[1] >= idx): + # Calculate left bound within wrapped line. + if seg[0] >= idx: + lb = seg[0] - idx + else: + lb = 0 + + # Calculate right bound within wrapped line. + if seg[1] < rlim: + rb = seg[1] - idx + else: + rb = rlim - idx + + if rb > lb: # Omit zero-length segments. + wseg = (lb, rb, seg[2]) + if row_counter not in out.font_attr_segs: + out.font_attr_segs[row_counter] = [wseg] + else: + out.font_attr_segs[row_counter].append(wseg) + + idx += cols + row_counter += 1 + + out.lines.extend(wlines) + + # Copy over keys of annotation that are not row indices. + for key in inp.annotations: + if not isinstance(key, int): + out.annotations[key] = inp.annotations[key] + + return out, new_line_indices + + +class CommandHandlerRegistry(object): + """Registry of command handlers for CLI. + + Handler methods (callables) for user commands can be registered with this + class, which then is able to dispatch commands to the correct handlers and + retrieve the RichTextLines output. + + For example, suppose you have the following handler defined: + def echo(argv, screen_info=None): + return RichTextLines(["arguments = %s" % " ".join(argv), + "screen_info = " + repr(screen_info)]) + + you can register the handler with the command prefix "echo" and alias "e": + registry = CommandHandlerRegistry() + registry.register_command_handler("echo", echo, + "Echo arguments, along with screen info", prefix_aliases=["e"]) + + then to invoke this command handler with some arguments and screen_info, do: + registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80}) + + or with the prefix alias: + registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80}) + + The call will return a RichTextLines object which can be rendered by a CLI. + """ + + HELP_COMMAND = "help" + HELP_COMMAND_ALIASES = ["h"] + VERSION_COMMAND = "version" + VERSION_COMMAND_ALIASES = ["ver"] + + def __init__(self): + # A dictionary from command prefix to handler. + self._handlers = {} + + # A dictionary from prefix alias to prefix. + self._alias_to_prefix = {} + + # A dictionary from prefix to aliases. + self._prefix_to_aliases = {} + + # A dictionary from command prefix to help string. + self._prefix_to_help = {} + + # Introductory text to help information. + self._help_intro = None + + # Register a default handler for the command "help". + self.register_command_handler( + self.HELP_COMMAND, + self._help_handler, + "Print this help message.", + prefix_aliases=self.HELP_COMMAND_ALIASES) + + # Register a default handler for the command "version". + self.register_command_handler( + self.VERSION_COMMAND, + self._version_handler, + "Print the versions of TensorFlow and its key dependencies.", + prefix_aliases=self.VERSION_COMMAND_ALIASES) + + def register_command_handler(self, + prefix, + handler, + help_info, + prefix_aliases=None): + """Register a callable as a command handler. + + Args: + prefix: Command prefix, i.e., the first word in a command, e.g., + "print" as in "print tensor_1". + handler: A callable of the following signature: + foo_handler(argv, screen_info=None), + where argv is the argument vector (excluding the command prefix) and + screen_info is a dictionary containing information about the screen, + such as number of columns, e.g., {"cols": 100}. + The callable should return: + 1) a RichTextLines object representing the screen output. + + The callable can also raise an exception of the type CommandLineExit, + which if caught by the command-line interface, will lead to its exit. + The exception can optionally carry an exit token of arbitrary type. + help_info: A help string. + prefix_aliases: Aliases for the command prefix, as a list of str. E.g., + shorthands for the command prefix: ["p", "pr"] + + Raises: + ValueError: If + 1) the prefix is empty, or + 2) handler is not callable, or + 3) a handler is already registered for the prefix, or + 4) elements in prefix_aliases clash with existing aliases. + 5) help_info is not a str. + """ + + if not prefix: + raise ValueError("Empty command prefix") + + if prefix in self._handlers: + raise ValueError( + "A handler is already registered for command prefix \"%s\"" % prefix) + + # Make sure handler is callable. + if not callable(handler): + raise ValueError("handler is not callable") + + # Make sure that help info is a string. + if not isinstance(help_info, six.string_types): + raise ValueError("help_info is not a str") + + # Process prefix aliases. + if prefix_aliases: + for alias in prefix_aliases: + if self._resolve_prefix(alias): + raise ValueError( + "The prefix alias \"%s\" clashes with existing prefixes or " + "aliases." % alias) + self._alias_to_prefix[alias] = prefix + + self._prefix_to_aliases[prefix] = prefix_aliases + + # Store handler. + self._handlers[prefix] = handler + + # Store help info. + self._prefix_to_help[prefix] = help_info + + def dispatch_command(self, prefix, argv, screen_info=None): + """Handles a command by dispatching it to a registered command handler. + + Args: + prefix: Command prefix, as a str, e.g., "print". + argv: Command argument vector, excluding the command prefix, represented + as a list of str, e.g., + ["tensor_1"] + screen_info: A dictionary containing screen info, e.g., {"cols": 100}. + + Returns: + An instance of RichTextLines or None. If any exception is caught during + the invocation of the command handler, the RichTextLines will wrap the + error type and message. + + Raises: + ValueError: If + 1) prefix is empty, or + 2) no command handler is registered for the command prefix, or + 3) the handler is found for the prefix, but it fails to return a + RichTextLines or raise any exception. + CommandLineExit: + If the command handler raises this type of exception, this method will + simply pass it along. + """ + if not prefix: + raise ValueError("Prefix is empty") + + resolved_prefix = self._resolve_prefix(prefix) + if not resolved_prefix: + raise ValueError("No handler is registered for command prefix \"%s\"" % + prefix) + + handler = self._handlers[resolved_prefix] + try: + output = handler(argv, screen_info=screen_info) + except CommandLineExit as e: + raise e + except SystemExit as e: + # Special case for syntax errors caught by argparse. + lines = ["Syntax error for command: %s" % prefix, + "For help, do \"help %s\"" % prefix] + output = RichTextLines(lines) + + except BaseException as e: # pylint: disable=broad-except + lines = ["Error occurred during handling of command: %s %s:" % + (resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))] + + # Include traceback of the exception. + lines.append("") + lines.extend(traceback.format_exc().split("\n")) + + output = RichTextLines(lines) + + if not isinstance(output, RichTextLines) and output is not None: + raise ValueError( + "Return value from command handler %s is not None or a RichTextLines " + "instance" % str(handler)) + + return output + + def is_registered(self, prefix): + """Test if a command prefix or its alias is has a registered handler. + + Args: + prefix: A prefix or its alias, as a str. + + Returns: + True iff a handler is registered for prefix. + """ + return self._resolve_prefix(prefix) is not None + + def get_help(self, cmd_prefix=None): + """Compile help information into a RichTextLines object. + + Args: + cmd_prefix: Optional command prefix. As the prefix itself or one of its + aliases. + + Returns: + A RichTextLines object containing the help information. If cmd_prefix + is None, the return value will be the full command-line help. Otherwise, + it will be the help information for the specified command. + """ + if not cmd_prefix: + # Print full help information, in sorted order of the command prefixes. + help_info = RichTextLines([]) + if self._help_intro: + # If help intro is available, show it at the beginning. + help_info.extend(self._help_intro) + + sorted_prefixes = sorted(self._handlers) + for cmd_prefix in sorted_prefixes: + lines = self._get_help_for_command_prefix(cmd_prefix) + lines.append("") + lines.append("") + help_info.extend(RichTextLines(lines)) + + return help_info + else: + return RichTextLines(self._get_help_for_command_prefix(cmd_prefix)) + + def set_help_intro(self, help_intro): + """Set an introductory message to help output. + + Args: + help_intro: (RichTextLines) Rich text lines appended to the + beginning of the output of the command "help", as introductory + information. + """ + self._help_intro = help_intro + + def _help_handler(self, args, screen_info=None): + """Command handler for "help". + + "help" is a common command that merits built-in support from this class. + + Args: + args: Command line arguments to "help" (not including "help" itself). + screen_info: (dict) Information regarding the screen, e.g., the screen + width in characters: {"cols": 80} + + Returns: + (RichTextLines) Screen text output. + """ + + _ = screen_info # Unused currently. + + if not args: + return self.get_help() + elif len(args) == 1: + return self.get_help(args[0]) + else: + return RichTextLines(["ERROR: help takes only 0 or 1 input argument."]) + + def _version_handler(self, args, screen_info=None): + del args # Unused currently. + del screen_info # Unused currently. + return get_tensorflow_version_lines(include_dependency_versions=True) + + def _resolve_prefix(self, token): + """Resolve command prefix from the prefix itself or its alias. + + Args: + token: a str to be resolved. + + Returns: + If resolvable, the resolved command prefix. + If not resolvable, None. + """ + if token in self._handlers: + return token + elif token in self._alias_to_prefix: + return self._alias_to_prefix[token] + else: + return None + + def _get_help_for_command_prefix(self, cmd_prefix): + """Compile the help information for a given command prefix. + + Args: + cmd_prefix: Command prefix, as the prefix itself or one of its + aliases. + + Returns: + A list of str as the help information fo cmd_prefix. If the cmd_prefix + does not exist, the returned list of str will indicate that. + """ + lines = [] + + resolved_prefix = self._resolve_prefix(cmd_prefix) + if not resolved_prefix: + lines.append("Invalid command prefix: \"%s\"" % cmd_prefix) + return lines + + lines.append(resolved_prefix) + + if resolved_prefix in self._prefix_to_aliases: + lines.append(HELP_INDENT + "Aliases: " + ", ".join( + self._prefix_to_aliases[resolved_prefix])) + + lines.append("") + help_lines = self._prefix_to_help[resolved_prefix].split("\n") + for line in help_lines: + lines.append(HELP_INDENT + line) + + return lines + + +class TabCompletionRegistry(object): + """Registry for tab completion responses.""" + + def __init__(self): + self._comp_dict = {} + + # TODO(cais): Rename method names with "comp" to "*completion*" to avoid + # confusion. + + def register_tab_comp_context(self, context_words, comp_items): + """Register a tab-completion context. + + Register that, for each word in context_words, the potential tab-completions + are the words in comp_items. + + A context word is a pre-existing, completed word in the command line that + determines how tab-completion works for another, incomplete word in the same + command line. + Completion items consist of potential candidates for the incomplete word. + + To give a general example, a context word can be "drink", and the completion + items can be ["coffee", "tea", "water"] + + Note: A context word can be empty, in which case the context is for the + top-level commands. + + Args: + context_words: A list of context words belonging to the context being + registered. It is a list of str, instead of a single string, to support + synonym words triggering the same tab-completion context, e.g., + both "drink" and the short-hand "dr" can trigger the same context. + comp_items: A list of completion items, as a list of str. + + Raises: + TypeError: if the input arguments are not all of the correct types. + """ + + if not isinstance(context_words, list): + raise TypeError("Incorrect type in context_list: Expected list, got %s" % + type(context_words)) + + if not isinstance(comp_items, list): + raise TypeError("Incorrect type in comp_items: Expected list, got %s" % + type(comp_items)) + + # Sort the completion items on registration, so that later during + # get_completions calls, no sorting will be necessary. + sorted_comp_items = sorted(comp_items) + + for context_word in context_words: + self._comp_dict[context_word] = sorted_comp_items + + def deregister_context(self, context_words): + """Deregister a list of context words. + + Args: + context_words: A list of context words to deregister, as a list of str. + + Raises: + KeyError: if there are word(s) in context_words that do not correspond + to any registered contexts. + """ + + for context_word in context_words: + if context_word not in self._comp_dict: + raise KeyError("Cannot deregister unregistered context word \"%s\"" % + context_word) + + for context_word in context_words: + del self._comp_dict[context_word] + + def extend_comp_items(self, context_word, new_comp_items): + """Add a list of completion items to a completion context. + + Args: + context_word: A single completion word as a string. The extension will + also apply to all other context words of the same context. + new_comp_items: (list of str) New completion items to add. + + Raises: + KeyError: if the context word has not been registered. + """ + + if context_word not in self._comp_dict: + raise KeyError("Context word \"%s\" has not been registered" % + context_word) + + self._comp_dict[context_word].extend(new_comp_items) + self._comp_dict[context_word] = sorted(self._comp_dict[context_word]) + + def remove_comp_items(self, context_word, comp_items): + """Remove a list of completion items from a completion context. + + Args: + context_word: A single completion word as a string. The removal will + also apply to all other context words of the same context. + comp_items: Completion items to remove. + + Raises: + KeyError: if the context word has not been registered. + """ + + if context_word not in self._comp_dict: + raise KeyError("Context word \"%s\" has not been registered" % + context_word) + + for item in comp_items: + self._comp_dict[context_word].remove(item) + + def get_completions(self, context_word, prefix): + """Get the tab completions given a context word and a prefix. + + Args: + context_word: The context word. + prefix: The prefix of the incomplete word. + + Returns: + (1) None if no registered context matches the context_word. + A list of str for the matching completion items. Can be an empty list + of a matching context exists, but no completion item matches the + prefix. + (2) Common prefix of all the words in the first return value. If the + first return value is None, this return value will be None, too. If + the first return value is not None, i.e., a list, this return value + will be a str, which can be an empty str if there is no common + prefix among the items of the list. + """ + + if context_word not in self._comp_dict: + return None, None + + comp_items = self._comp_dict[context_word] + comp_items = sorted( + [item for item in comp_items if item.startswith(prefix)]) + + return comp_items, self._common_prefix(comp_items) + + def _common_prefix(self, m): + """Given a list of str, returns the longest common prefix. + + Args: + m: (list of str) A list of strings. + + Returns: + (str) The longest common prefix. + """ + if not m: + return "" + + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + + return s1 + + +class CommandHistory(object): + """Keeps command history and supports lookup.""" + + _HISTORY_FILE_NAME = ".tfdbg_history" + + def __init__(self, limit=100, history_file_path=None): + """CommandHistory constructor. + + Args: + limit: Maximum number of the most recent commands that this instance + keeps track of, as an int. + history_file_path: (str) Manually specified path to history file. Used in + testing. + """ + + self._commands = [] + self._limit = limit + self._history_file_path = ( + history_file_path or self._get_default_history_file_path()) + self._load_history_from_file() + + def _load_history_from_file(self): + if os.path.isfile(self._history_file_path): + try: + with open(self._history_file_path, "rt") as history_file: + commands = history_file.readlines() + self._commands = [command.strip() for command in commands + if command.strip()] + + # Limit the size of the history file. + if len(self._commands) > self._limit: + self._commands = self._commands[-self._limit:] + with open(self._history_file_path, "wt") as history_file: + for command in self._commands: + history_file.write(command + "\n") + except IOError: + print("WARNING: writing history file failed.") + + def _add_command_to_history_file(self, command): + try: + with open(self._history_file_path, "at") as history_file: + history_file.write(command + "\n") + except IOError: + pass + + @classmethod + def _get_default_history_file_path(cls): + return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME) + + def add_command(self, command): + """Add a command to the command history. + + Args: + command: The history command, as a str. + + Raises: + TypeError: if command is not a str. + """ + + if self._commands and command == self._commands[-1]: + # Ignore repeating commands in a row. + return + + if not isinstance(command, six.string_types): + raise TypeError("Attempt to enter non-str entry to command history") + + self._commands.append(command) + + if len(self._commands) > self._limit: + self._commands = self._commands[-self._limit:] + + self._add_command_to_history_file(command) + + def most_recent_n(self, n): + """Look up the n most recent commands. + + Args: + n: Number of most recent commands to look up. + + Returns: + A list of n most recent commands, or all available most recent commands, + if n exceeds size of the command history, in chronological order. + """ + + return self._commands[-n:] + + def lookup_prefix(self, prefix, n): + """Look up the n most recent commands that starts with prefix. + + Args: + prefix: The prefix to lookup. + n: Number of most recent commands to look up. + + Returns: + A list of n most recent commands that have the specified prefix, or all + available most recent commands that have the prefix, if n exceeds the + number of history commands with the prefix. + """ + + commands = [cmd for cmd in self._commands if cmd.startswith(prefix)] + + return commands[-n:] + + # TODO(cais): Lookup by regex. + + +class MenuItem(object): + """A class for an item in a text-based menu.""" + + def __init__(self, caption, content, enabled=True): + """Menu constructor. + + TODO(cais): Nested menu is currently not supported. Support it. + + Args: + caption: (str) caption of the menu item. + content: Content of the menu item. For a menu item that triggers + a command, for example, content is the command string. + enabled: (bool) whether this menu item is enabled. + """ + + self._caption = caption + self._content = content + self._enabled = enabled + + @property + def caption(self): + return self._caption + + @property + def type(self): + return self._node_type + + @property + def content(self): + return self._content + + def is_enabled(self): + return self._enabled + + def disable(self): + self._enabled = False + + def enable(self): + self._enabled = True + + +class Menu(object): + """A class for text-based menu.""" + + def __init__(self, name=None): + """Menu constructor. + + Args: + name: (str or None) name of this menu. + """ + + self._name = name + self._items = [] + + def append(self, item): + """Append an item to the Menu. + + Args: + item: (MenuItem) the item to be appended. + """ + self._items.append(item) + + def insert(self, index, item): + self._items.insert(index, item) + + def num_items(self): + return len(self._items) + + def captions(self): + return [item.caption for item in self._items] + + def caption_to_item(self, caption): + """Get a MenuItem from the caption. + + Args: + caption: (str) The caption to look up. + + Returns: + (MenuItem) The first-match menu item with the caption, if any. + + Raises: + LookupError: If a menu item with the caption does not exist. + """ + + captions = self.captions() + if caption not in captions: + raise LookupError("There is no menu item with the caption \"%s\"" % + caption) + + return self._items[captions.index(caption)] + + def format_as_single_line(self, + prefix=None, + divider=" | ", + enabled_item_attrs=None, + disabled_item_attrs=None): + """Format the menu as a single-line RichTextLines object. + + Args: + prefix: (str) String added to the beginning of the line. + divider: (str) The dividing string between the menu items. + enabled_item_attrs: (list or str) Attributes applied to each enabled + menu item, e.g., ["bold", "underline"]. + disabled_item_attrs: (list or str) Attributes applied to each + disabled menu item, e.g., ["red"]. + + Returns: + (RichTextLines) A single-line output representing the menu, with + font_attr_segs marking the individual menu items. + """ + + if (enabled_item_attrs is not None and + not isinstance(enabled_item_attrs, list)): + enabled_item_attrs = [enabled_item_attrs] + + if (disabled_item_attrs is not None and + not isinstance(disabled_item_attrs, list)): + disabled_item_attrs = [disabled_item_attrs] + + menu_line = prefix if prefix is not None else "" + attr_segs = [] + + for item in self._items: + menu_line += item.caption + item_name_begin = len(menu_line) - len(item.caption) + + if item.is_enabled(): + final_attrs = [item] + if enabled_item_attrs: + final_attrs.extend(enabled_item_attrs) + attr_segs.append((item_name_begin, len(menu_line), final_attrs)) + else: + if disabled_item_attrs: + attr_segs.append( + (item_name_begin, len(menu_line), disabled_item_attrs)) + + menu_line += divider + + return RichTextLines(menu_line, font_attr_segs={0: attr_segs}) + +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import json + +from extensions_paths import CHROME_EXTENSIONS +from test_file_system import MoveAllTo +from test_util import ReadFile + +FAKE_TABS_IDL = '\n'.join([ + '// Copyleft stuff.', + '', + '// Some description here.', + 'namespace fakeTabs {', + ' dictionary WasImplicitlyInlinedType {};', + ' interface Functions {', + ' static void myFunc(WasImplicitlyInlinedType arg);', + ' static void anotherFunc(WasImplicitlyInlinedType arg);', + ' };', + '};']) + +FAKE_TABS_WITH_INLINING_IDL = '\n'.join([ + '// Copyleft stuff.', + '', + '// Some description here.', + 'namespace fakeTabs {', + ' dictionary WasImplicitlyInlinedType {};', + ' interface Functions {', + ' static void myFunc(WasImplicitlyInlinedType arg);', + ' };', + '};']) + +TABS_SCHEMA_BRANCHES = MoveAllTo(CHROME_EXTENSIONS, { + 'master': { + 'docs': { + 'templates': { + 'json': { + 'api_availabilities.json': '{}', + 'intro_tables.json': '{}' + } + } + }, + 'api': { + '_api_features.json': json.dumps({ + 'tabs.scheduledFunc': { + 'channel': 'stable' + } + }), + '_manifest_features.json': '{}', + '_permission_features.json': '{}', + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'type': 'any', + 'properties': { + 'url': { + 'type': 'any' + }, + 'index': { + 'type': 'any' + }, + 'selected': { + 'type': 'any' + }, + 'id': { + 'type': 'any' + }, + 'windowId': { + 'type': 'any' + } + } + }, + { + 'id': 'InlinedType', + 'type': 'any', + 'inline_doc': True + }, + { + 'id': 'InjectDetails', + 'type': 'any', + 'properties': { + 'allFrames': { + 'type': 'any' + }, + 'code': { + 'type': 'any' + }, + 'file': { + 'type':'any' + } + } + }, + { + 'id': 'DeprecatedType', + 'type': 'any', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': { + 'type': 'any' + }, + 'fakeTabsProperty2': { + 'type': 'any' + }, + 'fakeTabsProperty3': { + 'type': 'any' + } + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'type': 'function', + 'parameters': [ + { + 'name': 'tab', + 'type': 'any' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'type': 'function', + 'parameters': [ + { + 'name': 'tab', + 'type': 'any' + } + ] + }, + { + 'name': 'tabId', + 'type': 'any' + } + ] + }, + { + 'name': 'restrictedFunc' + }, + { + 'name': 'scheduledFunc', + 'parameters': [] + } + ], + 'events': [ + { + 'name': 'onActivated', + 'type': 'event', + 'parameters': [ + { + 'name': 'activeInfo', + 'type': 'any', + 'properties': { + 'tabId': { + 'type': 'any' + }, + 'windowId': { + 'type': 'any' + } + } + } + ] + }, + { + 'name': 'onUpdated', + 'type': 'event', + 'parameters': [ + { + 'name': 'tabId', + 'type': 'any' + }, + { + 'name': 'tab', + 'type': 'any' + }, + { + 'name': 'changeInfo', + 'type': 'any', + 'properties': { + 'pinned': { + 'type': 'any' + }, + 'status': { + 'type': 'any' + } + } + } + ] + } + ] + }]) + } + }, + '1612': { + 'api': { + '_api_features.json': json.dumps({ + 'tabs.scheduledFunc': { + 'channel': 'stable' + } + }), + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + 'code': {}, + 'file': {} + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + }, + { + 'name': 'tabId' + } + ] + }, + { + 'name': 'restrictedFunc' + }, + { + 'name': 'scheduledFunc', + 'parameters': [] + } + ], + 'events': [ + { + 'name': 'onActivated', + 'parameters': [ + { + 'name': 'activeInfo', + 'properties': { + 'tabId': {}, + 'windowId': {} + } + } + ] + }, + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'tab' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1599': { + 'api': { + '_api_features.json': "{}", + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + 'code': {}, + 'file': {} + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + }, + { + 'name': 'tabId' + } + ] + }, + { + 'name': 'restrictedFunc' + } + ], + 'events': [ + { + 'name': 'onActivated', + 'parameters': [ + { + 'name': 'activeInfo', + 'properties': { + 'tabId': {}, + } + } + ] + }, + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1547': { + 'api': { + '_api_features.json': json.dumps({ + 'tabs.restrictedFunc': { + 'channel': 'dev' + } + }), + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + 'code': {}, + 'file': {} + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + }, + ] + }, + { + 'name': 'restrictedFunc' + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1500': { + 'api': { + '_api_features.json': "{}", + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + }, + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1453': { + 'api': { + '_api_features.json': "{}", + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + }, + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1410': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {}, + } + }, + { + 'id': 'DeprecatedType', + 'deprecated': 'This is deprecated' + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1364': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'fake_tabs.idl': FAKE_TABS_WITH_INLINING_IDL, + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + }, + { + 'id': 'InjectDetails', + 'properties': { + 'allFrames': {} + } + }, + { + 'id': 'DeprecatedType', + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1312': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1271': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1229': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {}, + 'windowId': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1180': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'selected': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1132': { + 'api': { + '_manifest_features.json': "{}", + '_permission_features.json': "{}", + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1084': { + 'api': { + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'getCurrent', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + }, + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '1025': { + 'api': { + 'tabs.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'index': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '963': { + 'api': { + 'extension_api.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + }, + { + 'name': 'changeInfo', + 'properties': { + 'pinned': {}, + 'status': {} + } + } + ] + } + ] + }]) + } + }, + '912': { + 'api': { + 'extension_api.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + } + ] + } + ] + }]) + } + }, + '874': { + 'api': { + 'extension_api.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {}, + 'fakeTabsProperty2': {} + }, + 'functions': [ + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + } + ] + } + ] + }]) + } + }, + '835': { + 'api': { + 'extension_api.json': json.dumps([{ + 'namespace': 'tabs', + 'types': [ + { + 'id': 'Tab', + 'properties': { + 'url': {}, + 'id': {} + } + } + ], + 'properties': { + 'fakeTabsProperty1': {} + }, + 'functions': [ + { + 'name': 'get', + 'parameters': [ + { + 'name': 'callback', + 'parameters': [ + { + 'name': 'tab' + } + ] + } + ] + } + ], + 'events': [ + { + 'name': 'onUpdated', + 'parameters': [ + { + 'name': 'tabId' + } + ] + } + ] + }]) + } + }, + '782': { + 'api': { + 'extension_api.json': "{}" + } + } +}) + +import tensorflow as tf +import numpy as np +from PIL import Image +import os + +def maybe_download(directory, filename, url): + print('Try to dwnloaded', url) + if not tf.gfile.Exists(directory): + tf.gfile.MakeDirs(directory) + filepath = os.path.join(directory, filename) + if not tf.gfile.Exists(filepath): + filepath, _ = urllib.request.urlretrieve(url, filepath) + with tf.gfile.GFile(filepath) as f: + size = f.size() + print('Successfully downloaded', filename, size, 'bytes.') + return filepath + +def load_pretrained(filepath): + return np.load(filepath, encoding='bytes').item() + +def get_epoch(): + epoch_step = tf.Variable(0, name='epoch_step', trainable=False) + epoch_update = epoch_step.assign(epoch_step + 1) + return epoch_step, epoch_update + +def load_imgs(train_img_dir, filelist): + def load_img(path): + _img = Image.open(path) + img = np.array(_img) + _img.close() + return img + + _imgs = [os.path.join(train_img_dir, filename + ".png") for filename in filelist] + + imgs = [load_img(_img) for _img in _imgs] + return imgs + +def load_annots(train_annot_dir, filelist): + def load_annot(path): + #print(path) + annot = np.load(path, encoding='bytes') + #print("original dims: {}x{}".format(annot[0,0], annot[0,1])) + return annot + + _annots = [os.path.join(train_annot_dir, filename + ".npy") for filename in filelist] + + annots = [load_annot(_annot) for _annot in _annots] + + return annots + +def tf_Print(on, x, summarize=50, message=""): + if on: + x = tf.Print(x, [x, tf.shape(x)], summarize=summarize, message=message) + + return x + +def debug_print(on, *x): + if on: + print(x) + return x + +"""A module which implements the time frequency estimation. + +Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM +""" +# Authors : Alexandre Gramfort +# Hari Bharadwaj +# +# License : BSD (3-clause) + +import warnings +from math import sqrt +from copy import deepcopy +import numpy as np +from scipy import linalg +from scipy.fftpack import fftn, ifftn + +from ..fixes import partial +from ..baseline import rescale +from ..parallel import parallel_func +from ..utils import logger, verbose, _time_mask +from ..channels.channels import ContainsMixin, UpdateChannelsMixin +from ..io.pick import pick_info, pick_types +from ..utils import check_fname +from .multitaper import dpss_windows +from .._hdf5 import write_hdf5, read_hdf5 + + +def _get_data(inst, return_itc): + """Get data from Epochs or Evoked instance as epochs x ch x time""" + from ..epochs import _BaseEpochs + from ..evoked import Evoked + if not isinstance(inst, (_BaseEpochs, Evoked)): + raise TypeError('inst must be Epochs or Evoked') + if isinstance(inst, _BaseEpochs): + data = inst.get_data() + else: + if return_itc: + raise ValueError('return_itc must be False for evoked data') + data = inst.data[np.newaxis, ...].copy() + return data + + +def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False): + """Compute Wavelets for the given frequency range + + Parameters + ---------- + sfreq : float + Sampling Frequency + freqs : array + frequency range of interest (1 x Frequencies) + n_cycles: float | array of float + Number of cycles. Fixed number or one per frequency. + sigma : float, (optional) + It controls the width of the wavelet ie its temporal + resolution. If sigma is None the temporal resolution + is adapted with the frequency like for all wavelet transform. + The higher the frequency the shorter is the wavelet. + If sigma is fixed the temporal resolution is fixed + like for the short time Fourier transform and the number + of oscillations increases with the frequency. + zero_mean : bool + Make sure the wavelet is zero mean + + Returns + ------- + Ws : list of array + Wavelets time series + + See Also + -------- + mne.time_frequency.cwt_morlet : Compute time-frequency decomposition + with Morlet wavelets + """ + Ws = list() + n_cycles = np.atleast_1d(n_cycles) + + if (n_cycles.size != 1) and (n_cycles.size != len(freqs)): + raise ValueError("n_cycles should be fixed or defined for " + "each frequency.") + for k, f in enumerate(freqs): + if len(n_cycles) != 1: + this_n_cycles = n_cycles[k] + else: + this_n_cycles = n_cycles[0] + # fixed or scale-dependent window + if sigma is None: + sigma_t = this_n_cycles / (2.0 * np.pi * f) + else: + sigma_t = this_n_cycles / (2.0 * np.pi * sigma) + # this scaling factor is proportional to (Tallon-Baudry 98): + # (sigma_t*sqrt(pi))^(-1/2); + t = np.arange(0., 5. * sigma_t, 1.0 / sfreq) + t = np.r_[-t[::-1], t[1:]] + oscillation = np.exp(2.0 * 1j * np.pi * f * t) + gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2)) + if zero_mean: # to make it zero mean + real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2) + oscillation -= real_offset + W = oscillation * gaussian_enveloppe + W /= sqrt(0.5) * linalg.norm(W.ravel()) + Ws.append(W) + return Ws + + +def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0, + zero_mean=False): + """Compute Wavelets for the given frequency range + + Parameters + ---------- + sfreq : float + Sampling Frequency. + freqs : ndarray, shape (n_freqs,) + The frequencies in Hz. + n_cycles : float | ndarray, shape (n_freqs,) + The number of cycles globally or for each frequency. + Defaults to 7. + time_bandwidth : float, (optional) + Time x Bandwidth product. + The number of good tapers (low-bias) is chosen automatically based on + this to equal floor(time_bandwidth - 1). + Default is 4.0, giving 3 good tapers. + + Returns + ------- + Ws : list of array + Wavelets time series + """ + Ws = list() + if time_bandwidth < 2.0: + raise ValueError("time_bandwidth should be >= 2.0 for good tapers") + n_taps = int(np.floor(time_bandwidth - 1)) + n_cycles = np.atleast_1d(n_cycles) + + if n_cycles.size != 1 and n_cycles.size != len(freqs): + raise ValueError("n_cycles should be fixed or defined for " + "each frequency.") + + for m in range(n_taps): + Wm = list() + for k, f in enumerate(freqs): + if len(n_cycles) != 1: + this_n_cycles = n_cycles[k] + else: + this_n_cycles = n_cycles[0] + + t_win = this_n_cycles / float(f) + t = np.arange(0., t_win, 1.0 / sfreq) + # Making sure wavelets are centered before tapering + oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.)) + + # Get dpss tapers + tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2., + n_taps) + + Wk = oscillation * tapers[m] + if zero_mean: # to make it zero mean + real_offset = Wk.mean() + Wk -= real_offset + Wk /= sqrt(0.5) * linalg.norm(Wk.ravel()) + + Wm.append(Wk) + + Ws.append(Wm) + + return Ws + + +def _centered(arr, newsize): + """Aux Function to center data""" + # Return the center newsize portion of the array. + newsize = np.asarray(newsize) + currsize = np.array(arr.shape) + startind = (currsize - newsize) // 2 + endind = startind + newsize + myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] + return arr[tuple(myslice)] + + +def _cwt_fft(X, Ws, mode="same"): + """Compute cwt with fft based convolutions + Return a generator over signals. + """ + X = np.asarray(X) + + # Precompute wavelets for given frequency range to save time + n_signals, n_times = X.shape + n_freqs = len(Ws) + + Ws_max_size = max(W.size for W in Ws) + size = n_times + Ws_max_size - 1 + # Always use 2**n-sized FFT + fsize = 2 ** int(np.ceil(np.log2(size))) + + # precompute FFTs of Ws + fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128) + for i, W in enumerate(Ws): + if len(W) > n_times: + raise ValueError('Wavelet is too long for such a short signal. ' + 'Reduce the number of cycles.') + fft_Ws[i] = fftn(W, [fsize]) + + for k, x in enumerate(X): + if mode == "full": + tfr = np.zeros((n_freqs, fsize), dtype=np.complex128) + elif mode == "same" or mode == "valid": + tfr = np.zeros((n_freqs, n_times), dtype=np.complex128) + + fft_x = fftn(x, [fsize]) + for i, W in enumerate(Ws): + ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1] + if mode == "valid": + sz = abs(W.size - n_times) + 1 + offset = (n_times - sz) / 2 + tfr[i, offset:(offset + sz)] = _centered(ret, sz) + else: + tfr[i, :] = _centered(ret, n_times) + yield tfr + + +def _cwt_convolve(X, Ws, mode='same'): + """Compute time freq decomposition with temporal convolutions + Return a generator over signals. + """ + X = np.asarray(X) + + n_signals, n_times = X.shape + n_freqs = len(Ws) + + # Compute convolutions + for x in X: + tfr = np.zeros((n_freqs, n_times), dtype=np.complex128) + for i, W in enumerate(Ws): + ret = np.convolve(x, W, mode=mode) + if len(W) > len(x): + raise ValueError('Wavelet is too long for such a short ' + 'signal. Reduce the number of cycles.') + if mode == "valid": + sz = abs(W.size - n_times) + 1 + offset = (n_times - sz) / 2 + tfr[i, offset:(offset + sz)] = ret + else: + tfr[i] = ret + yield tfr + + +def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False): + """Compute time freq decomposition with Morlet wavelets + + This function operates directly on numpy arrays. Consider using + `tfr_morlet` to process `Epochs` or `Evoked` instances. + + Parameters + ---------- + X : array of shape [n_signals, n_times] + signals (one per line) + sfreq : float + sampling Frequency + freqs : array + Array of frequencies of interest + use_fft : bool + Compute convolution with FFT or temoral convolution. + n_cycles: float | array of float + Number of cycles. Fixed number or one per frequency. + zero_mean : bool + Make sure the wavelets are zero mean. + + Returns + ------- + tfr : 3D array + Time Frequency Decompositions (n_signals x n_frequencies x n_times) + + See Also + -------- + tfr.cwt : Compute time-frequency decomposition with user-provided wavelets + """ + mode = 'same' + # mode = "valid" + n_signals, n_times = X.shape + n_frequencies = len(freqs) + + # Precompute wavelets for given frequency range to save time + Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean) + + if use_fft: + coefs = _cwt_fft(X, Ws, mode) + else: + coefs = _cwt_convolve(X, Ws, mode) + + tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex) + for k, tfr in enumerate(coefs): + tfrs[k] = tfr + + return tfrs + + +def cwt(X, Ws, use_fft=True, mode='same', decim=1): + """Compute time freq decomposition with continuous wavelet transform + + Parameters + ---------- + X : array of shape [n_signals, n_times] + signals (one per line) + Ws : list of array + Wavelets time series + use_fft : bool + Use FFT for convolutions + mode : 'same' | 'valid' | 'full' + Convention for convolution + decim : int + Temporal decimation factor + + Returns + ------- + tfr : 3D array + Time Frequency Decompositions (n_signals x n_frequencies x n_times) + + See Also + -------- + mne.time_frequency.cwt_morlet : Compute time-frequency decomposition + with Morlet wavelets + """ + n_signals, n_times = X[:, ::decim].shape + n_frequencies = len(Ws) + + if use_fft: + coefs = _cwt_fft(X, Ws, mode) + else: + coefs = _cwt_convolve(X, Ws, mode) + + tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex) + for k, tfr in enumerate(coefs): + tfrs[k] = tfr[..., ::decim] + + return tfrs + + +def _time_frequency(X, Ws, use_fft, decim): + """Aux of time_frequency for parallel computing over channels + """ + n_epochs, n_times = X.shape + n_times = n_times // decim + bool(n_times % decim) + n_frequencies = len(Ws) + psd = np.zeros((n_frequencies, n_times)) # PSD + plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock + + mode = 'same' + if use_fft: + tfrs = _cwt_fft(X, Ws, mode) + else: + tfrs = _cwt_convolve(X, Ws, mode) + + for tfr in tfrs: + tfr = tfr[:, ::decim] + tfr_abs = np.abs(tfr) + psd += tfr_abs ** 2 + plf += tfr / tfr_abs + psd /= n_epochs + plf = np.abs(plf) / n_epochs + return psd, plf + + +@verbose +def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7, + baseline=None, baseline_mode='ratio', times=None, + decim=1, n_jobs=1, zero_mean=False, verbose=None): + """Compute time-frequency power on single epochs + + Parameters + ---------- + data : array of shape [n_epochs, n_channels, n_times] + The epochs + sfreq : float + Sampling rate + frequencies : array-like + The frequencies + use_fft : bool + Use the FFT for convolutions or not. + n_cycles : float | array of float + Number of cycles in the Morlet wavelet. Fixed number + or one per frequency. + baseline : None (default) or tuple of length 2 + The time interval to apply baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal ot (None, None) all the time + interval is used. + baseline_mode : None | 'ratio' | 'zscore' + Do baseline correction with ratio (power is divided by mean + power during baseline) or zscore (power is divided by standard + deviation of power during baseline after subtracting the mean, + power = [power - mean(power_baseline)] / std(power_baseline)) + times : array + Required to define baseline + decim : int + Temporal decimation factor + n_jobs : int + The number of epochs to process at the same time + zero_mean : bool + Make sure the wavelets are zero mean. + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). + + Returns + ------- + power : 4D array + Power estimate (Epochs x Channels x Frequencies x Timepoints). + """ + mode = 'same' + n_frequencies = len(frequencies) + n_epochs, n_channels, n_times = data[:, :, ::decim].shape + + # Precompute wavelets for given frequency range to save time + Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean) + + parallel, my_cwt, _ = parallel_func(cwt, n_jobs) + + logger.info("Computing time-frequency power on single epochs...") + + power = np.empty((n_epochs, n_channels, n_frequencies, n_times), + dtype=np.float) + + # Package arguments for `cwt` here to minimize omissions where only one of + # the two calls below is updated with new function arguments. + cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim) + if n_jobs == 1: + for k, e in enumerate(data): + x = cwt(e, **cwt_kw) + power[k] = (x * x.conj()).real + else: + # Precompute tf decompositions in parallel + tfrs = parallel(my_cwt(e, **cwt_kw) for e in data) + for k, tfr in enumerate(tfrs): + power[k] = (tfr * tfr.conj()).real + + # Run baseline correction. Be sure to decimate the times array as well if + # needed. + if times is not None: + times = times[::decim] + power = rescale(power, times, baseline, baseline_mode, copy=False) + return power + + +def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7, + decim=1, n_jobs=1, zero_mean=False): + """Compute time induced power and inter-trial phase-locking factor + + The time frequency decomposition is done with Morlet wavelets + + Parameters + ---------- + data : array + 3D array of shape [n_epochs, n_channels, n_times] + sfreq : float + sampling Frequency + frequencies : array + Array of frequencies of interest + use_fft : bool + Compute transform with fft based convolutions or temporal + convolutions. + n_cycles : float | array of float + Number of cycles. Fixed number or one per frequency. + decim: int + Temporal decimation factor + n_jobs : int + The number of CPUs used in parallel. All CPUs are used in -1. + Requires joblib package. + zero_mean : bool + Make sure the wavelets are zero mean. + + Returns + ------- + power : 2D array + Induced power (Channels x Frequencies x Timepoints). + Squared amplitude of time-frequency coefficients. + phase_lock : 2D array + Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints) + """ + n_frequencies = len(frequencies) + n_epochs, n_channels, n_times = data[:, :, ::decim].shape + + # Precompute wavelets for given frequency range to save time + Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean) + + psd = np.empty((n_channels, n_frequencies, n_times)) + plf = np.empty((n_channels, n_frequencies, n_times)) + # Separate to save memory for n_jobs=1 + parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs) + psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim) + for c in range(n_channels)) + for c, (psd_c, plf_c) in enumerate(psd_plf): + psd[c, :, :], plf[c, :, :] = psd_c, plf_c + return psd, plf + + +def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode, + baseline, vmin, vmax, dB): + """Aux Function to prepare tfr computation""" + from ..viz.utils import _setup_vmin_vmax + + if mode is not None and baseline is not None: + logger.info("Applying baseline correction '%s' during %s" % + (mode, baseline)) + data = rescale(data.copy(), times, baseline, mode) + + # crop time + itmin, itmax = None, None + idx = np.where(_time_mask(times, tmin, tmax))[0] + if tmin is not None: + itmin = idx[0] + if tmax is not None: + itmax = idx[-1] + 1 + + times = times[itmin:itmax] + + # crop freqs + ifmin, ifmax = None, None + idx = np.where(_time_mask(freqs, fmin, fmax))[0] + if fmin is not None: + ifmin = idx[0] + if fmax is not None: + ifmax = idx[-1] + 1 + + freqs = freqs[ifmin:ifmax] + + # crop data + data = data[:, ifmin:ifmax, itmin:itmax] + + times *= 1e3 + if dB: + data = 10 * np.log10((data * data.conj()).real) + + vmin, vmax = _setup_vmin_vmax(data, vmin, vmax) + return data, times, freqs, vmin, vmax + + +class AverageTFR(ContainsMixin, UpdateChannelsMixin): + """Container for Time-Frequency data + + Can for example store induced power at sensor level or intertrial + coherence. + + Parameters + ---------- + info : Info + The measurement info. + data : ndarray, shape (n_channels, n_freqs, n_times) + The data. + times : ndarray, shape (n_times,) + The time values in seconds. + freqs : ndarray, shape (n_freqs,) + The frequencies in Hz. + nave : int + The number of averaged TFRs. + comment : str | None + Comment on the data, e.g., the experimental condition. + Defaults to None. + method : str | None + Comment on the method used to compute the data, e.g., morlet wavelet. + Defaults to None. + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). + + Attributes + ---------- + ch_names : list + The names of the channels. + """ + @verbose + def __init__(self, info, data, times, freqs, nave, comment=None, + method=None, verbose=None): + self.info = info + if data.ndim != 3: + raise ValueError('data should be 3d. Got %d.' % data.ndim) + n_channels, n_freqs, n_times = data.shape + if n_channels != len(info['chs']): + raise ValueError("Number of channels and data size don't match" + " (%d != %d)." % (n_channels, len(info['chs']))) + if n_freqs != len(freqs): + raise ValueError("Number of frequencies and data size don't match" + " (%d != %d)." % (n_freqs, len(freqs))) + if n_times != len(times): + raise ValueError("Number of times and data size don't match" + " (%d != %d)." % (n_times, len(times))) + self.data = data + self.times = times + self.freqs = freqs + self.nave = nave + self.comment = comment + self.method = method + + @property + def ch_names(self): + return self.info['ch_names'] + + def crop(self, tmin=None, tmax=None, copy=False): + """Crop data to a given time interval + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + copy : bool + If False epochs is cropped in place. + """ + inst = self if not copy else self.copy() + mask = _time_mask(inst.times, tmin, tmax) + inst.times = inst.times[mask] + inst.data = inst.data[..., mask] + return inst + + @verbose + def plot(self, picks=None, baseline=None, mode='mean', tmin=None, + tmax=None, fmin=None, fmax=None, vmin=None, vmax=None, + cmap='RdBu_r', dB=False, colorbar=True, show=True, + title=None, axes=None, verbose=None): + """Plot TFRs in a topography with images + + Parameters + ---------- + picks : array-like of int | None + The indices of the channels to plot. + baseline : None (default) or tuple of length 2 + The time interval to apply baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal ot (None, None) all the time + interval is used. + mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' + Do baseline correction with ratio (power is divided by mean + power during baseline) or zscore (power is divided by standard + deviation of power during baseline after subtracting the mean, + power = [power - mean(power_baseline)] / std(power_baseline)). + If None no baseline correction is applied. + tmin : None | float + The first time instant to display. If None the first time point + available is used. + tmax : None | float + The last time instant to display. If None the last time point + available is used. + fmin : None | float + The first frequency to display. If None the first frequency + available is used. + fmax : None | float + The last frequency to display. If None the last frequency + available is used. + vmin : float | None + The mininum value an the color scale. If vmin is None, the data + minimum value is used. + vmax : float | None + The maxinum value an the color scale. If vmax is None, the data + maximum value is used. + cmap : matplotlib colormap | str + The colormap to use. Defaults to 'RdBu_r'. + dB : bool + If True, 20*log10 is applied to the data to get dB. + colorbar : bool + If true, colorbar will be added to the plot. For user defined axes, + the colorbar cannot be drawn. Defaults to True. + show : bool + Call pyplot.show() at the end. + title : str | None + String for title. Defaults to None (blank/no title). + axes : instance of Axes | list | None + The axes to plot to. If list, the list must be a list of Axes of + the same length as the number of channels. If instance of Axes, + there must be only one channel plotted. + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). + + Returns + ------- + fig : matplotlib.figure.Figure + The figure containing the topography. + """ + from ..viz.topo import _imshow_tfr + import matplotlib.pyplot as plt + times, freqs = self.times.copy(), self.freqs.copy() + data = self.data[picks] + + data, times, freqs, vmin, vmax = \ + _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode, + baseline, vmin, vmax, dB) + + tmin, tmax = times[0], times[-1] + if isinstance(axes, plt.Axes): + axes = [axes] + if isinstance(axes, list) and len(axes) != len(picks): + raise RuntimeError('There must be an axes for each picked ' + 'channel.') + if colorbar: + logger.warning('Cannot draw colorbar for user defined axes.') + for idx in range(len(data)): + if axes is None: + fig = plt.figure() + ax = fig.add_subplot(111) + else: + ax = axes[idx] + fig = ax.get_figure() + _imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, ylim=None, + tfr=data[idx: idx + 1], freq=freqs, + x_label='Time (ms)', y_label='Frequency (Hz)', + colorbar=False, picker=False, cmap=cmap) + if title: + fig.suptitle(title) + if show: + plt.show() + return fig + + def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None, + tmax=None, fmin=None, fmax=None, vmin=None, vmax=None, + layout=None, cmap='RdBu_r', title=None, dB=False, + colorbar=True, layout_scale=0.945, show=True, + border='none', fig_facecolor='k', font_color='w'): + """Plot TFRs in a topography with images + + Parameters + ---------- + picks : array-like of int | None + The indices of the channels to plot. If None all available + channels are displayed. + baseline : None (default) or tuple of length 2 + The time interval to apply baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal ot (None, None) all the time + interval is used. + mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' + Do baseline correction with ratio (power is divided by mean + power during baseline) or zscore (power is divided by standard + deviation of power during baseline after subtracting the mean, + power = [power - mean(power_baseline)] / std(power_baseline)). + If None no baseline correction is applied. + tmin : None | float + The first time instant to display. If None the first time point + available is used. + tmax : None | float + The last time instant to display. If None the last time point + available is used. + fmin : None | float + The first frequency to display. If None the first frequency + available is used. + fmax : None | float + The last frequency to display. If None the last frequency + available is used. + vmin : float | None + The mininum value an the color scale. If vmin is None, the data + minimum value is used. + vmax : float | None + The maxinum value an the color scale. If vmax is None, the data + maximum value is used. + layout : Layout | None + Layout instance specifying sensor positions. If possible, the + correct layout is inferred from the data. + cmap : matplotlib colormap | str + The colormap to use. Defaults to 'RdBu_r'. + title : str + Title of the figure. + dB : bool + If True, 20*log10 is applied to the data to get dB. + colorbar : bool + If true, colorbar will be added to the plot + layout_scale : float + Scaling factor for adjusting the relative size of the layout + on the canvas. + show : bool + Call pyplot.show() at the end. + border : str + matplotlib borders style to be used for each sensor plot. + fig_facecolor : str | obj + The figure face color. Defaults to black. + font_color: str | obj + The color of tick labels in the colorbar. Defaults to white. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure containing the topography. + """ + from ..viz.topo import _imshow_tfr, _plot_topo + import matplotlib.pyplot as plt + times = self.times.copy() + freqs = self.freqs + data = self.data + info = self.info + + if picks is not None: + data = data[picks] + info = pick_info(info, picks) + + data, times, freqs, vmin, vmax = \ + _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, + mode, baseline, vmin, vmax, dB) + + if layout is None: + from mne import find_layout + layout = find_layout(self.info) + + imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap) + + fig = _plot_topo(info=info, times=times, + show_func=imshow, layout=layout, + colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap, + layout_scale=layout_scale, title=title, border=border, + x_label='Time (ms)', y_label='Frequency (Hz)', + fig_facecolor=fig_facecolor, + font_color=font_color) + + if show: + plt.show() + + return fig + + def _check_compat(self, tfr): + """checks that self and tfr have the same time-frequency ranges""" + assert np.all(tfr.times == self.times) + assert np.all(tfr.freqs == self.freqs) + + def __add__(self, tfr): + self._check_compat(tfr) + out = self.copy() + out.data += tfr.data + return out + + def __iadd__(self, tfr): + self._check_compat(tfr) + self.data += tfr.data + return self + + def __sub__(self, tfr): + self._check_compat(tfr) + out = self.copy() + out.data -= tfr.data + return out + + def __isub__(self, tfr): + self._check_compat(tfr) + self.data -= tfr.data + return self + + def copy(self): + """Return a copy of the instance.""" + return deepcopy(self) + + def __repr__(self): + s = "time : [%f, %f]" % (self.times[0], self.times[-1]) + s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1]) + s += ", nave : %d" % self.nave + s += ', channels : %d' % self.data.shape[0] + return "" % s + + def apply_baseline(self, baseline, mode='mean'): + """Baseline correct the data + + Parameters + ---------- + baseline : tuple or list of length 2 + The time interval to apply rescaling / baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal to (None, None) all the time + interval is used. + mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' + Do baseline correction with ratio (power is divided by mean + power during baseline) or z-score (power is divided by standard + deviation of power during baseline after subtracting the mean, + power = [power - mean(power_baseline)] / std(power_baseline)) + If None, baseline no correction will be performed. + """ + self.data = rescale(self.data, self.times, baseline, mode, copy=False) + + def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None, + ch_type=None, baseline=None, mode='mean', + layout=None, vmin=None, vmax=None, cmap='RdBu_r', + sensors=True, colorbar=True, unit=None, res=64, size=2, + cbar_fmt='%1.1e', show_names=False, title=None, + axes=None, show=True, outlines='head', head_pos=None): + """Plot topographic maps of time-frequency intervals of TFR data + + Parameters + ---------- + tmin : None | float + The first time instant to display. If None the first time point + available is used. + tmax : None | float + The last time instant to display. If None the last time point + available is used. + fmin : None | float + The first frequency to display. If None the first frequency + available is used. + fmax : None | float + The last frequency to display. If None the last frequency + available is used. + ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None + The channel type to plot. For 'grad', the gradiometers are + collected in pairs and the RMS for each pair is plotted. + If None, then channels are chosen in the order given above. + baseline : tuple or list of length 2 + The time interval to apply rescaling / baseline correction. + If None do not apply it. If baseline is (a, b) + the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used + and if b is None then b is set to the end of the interval. + If baseline is equal to (None, None) all the time + interval is used. + mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' + Do baseline correction with ratio (power is divided by mean + power during baseline) or z-score (power is divided by standard + deviation of power during baseline after subtracting the mean, + power = [power - mean(power_baseline)] / std(power_baseline)) + If None, baseline no correction will be performed. + layout : None | Layout + Layout instance specifying sensor positions (does not need to + be specified for Neuromag data). If possible, the correct layout + file is inferred from the data; if no appropriate layout file was + found, the layout is automatically generated from the sensor + locations. + vmin : float | callable + The value specfying the lower bound of the color range. + If None, and vmax is None, -vmax is used. Else np.min(data). + If callable, the output equals vmin(data). + vmax : float | callable + The value specfying the upper bound of the color range. + If None, the maximum absolute value is used. If vmin is None, + but vmax is not, defaults to np.min(data). + If callable, the output equals vmax(data). + cmap : matplotlib colormap + Colormap. For magnetometers and eeg defaults to 'RdBu_r', else + 'Reds'. + sensors : bool | str + Add markers for sensor locations to the plot. Accepts matplotlib + plot format string (e.g., 'r+' for red plusses). If True, a circle + will be used (via .add_artist). Defaults to True. + colorbar : bool + Plot a colorbar. + unit : dict | str | None + The unit of the channel type used for colorbar label. If + scale is None the unit is automatically determined. + res : int + The resolution of the topomap image (n pixels along each side). + size : float + Side length per topomap in inches. + cbar_fmt : str + String format for colorbar values. + show_names : bool | callable + If True, show channel names on top of the map. If a callable is + passed, channel names will be formatted using the callable; e.g., + to delete the prefix 'MEG ' from all channel names, pass the + function lambda x: x.replace('MEG ', ''). If `mask` is not None, + only significant sensors will be shown. + title : str | None + Title. If None (default), no title is displayed. + axes : instance of Axes | None + The axes to plot to. If None the axes is defined automatically. + show : bool + Call pyplot.show() at the end. + outlines : 'head' | dict | None + The outlines to be drawn. If 'head', a head scheme will be drawn. + If dict, each key refers to a tuple of x and y positions. + The values in 'mask_pos' will serve as image mask. If None, nothing + will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool) + field will trigger automated shrinking of the positions due to + points outside the outline. Moreover, a matplotlib patch object can + be passed for advanced masking options, either directly or as a + function that returns patches (required for multi-axis plots). + head_pos : dict | None + If None (default), the sensors are positioned such that they span + the head circle. If dict, can have entries 'center' (tuple) and + 'scale' (tuple) for what the center and scale of the head should be + relative to the electrode locations. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure containing the topography. + """ + from ..viz import plot_tfr_topomap + return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin, + fmax=fmax, ch_type=ch_type, baseline=baseline, + mode=mode, layout=layout, vmin=vmin, vmax=vmax, + cmap=cmap, sensors=sensors, colorbar=colorbar, + unit=unit, res=res, size=size, + cbar_fmt=cbar_fmt, show_names=show_names, + title=title, axes=axes, show=show, + outlines=outlines, head_pos=head_pos) + + def save(self, fname, overwrite=False): + """Save TFR object to hdf5 file + + Parameters + ---------- + fname : str + The file name, which should end with -tfr.h5 . + overwrite : bool + If True, overwrite file (if it exists). Defaults to false + """ + write_tfrs(fname, self, overwrite=overwrite) + + +def _prepare_write_tfr(tfr, condition): + """Aux function""" + return (condition, dict(times=tfr.times, freqs=tfr.freqs, + data=tfr.data, info=tfr.info, nave=tfr.nave, + comment=tfr.comment, method=tfr.method)) + + +def write_tfrs(fname, tfr, overwrite=False): + """Write a TFR dataset to hdf5. + + Parameters + ---------- + fname : string + The file name, which should end with -tfr.h5 + tfr : AverageTFR instance, or list of AverageTFR instances + The TFR dataset, or list of TFR datasets, to save in one file. + Note. If .comment is not None, a name will be generated on the fly, + based on the order in which the TFR objects are passed + overwrite : bool + If True, overwrite file (if it exists). Defaults to False. + + See Also + -------- + read_tfrs + + Notes + ----- + .. versionadded:: 0.9.0 + """ + out = [] + if not isinstance(tfr, (list, tuple)): + tfr = [tfr] + for ii, tfr_ in enumerate(tfr): + comment = ii if tfr_.comment is None else tfr_.comment + out.append(_prepare_write_tfr(tfr_, condition=comment)) + write_hdf5(fname, out, overwrite=overwrite) + + +def read_tfrs(fname, condition=None): + """ + Read TFR datasets from hdf5 file. + + Parameters + ---------- + fname : string + The file name, which should end with -tfr.h5 . + condition : int or str | list of int or str | None + The condition to load. If None, all conditions will be returned. + Defaults to None. + + See Also + -------- + write_tfrs + + Returns + ------- + tfrs : list of instances of AverageTFR | instance of AverageTFR + Depending on `condition` either the TFR object or a list of multiple + TFR objects. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + + check_fname(fname, 'tfr', ('-tfr.h5',)) + + logger.info('Reading %s ...' % fname) + tfr_data = read_hdf5(fname) + if condition is not None: + tfr_dict = dict(tfr_data) + if condition not in tfr_dict: + keys = ['%s' % k for k in tfr_dict] + raise ValueError('Cannot find condition ("{0}") in this file. ' + 'I can give you "{1}""' + .format(condition, " or ".join(keys))) + out = AverageTFR(**tfr_dict[condition]) + else: + out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]] + return out + + +def tfr_morlet(inst, freqs, n_cycles, use_fft=False, + return_itc=True, decim=1, n_jobs=1): + """Compute Time-Frequency Representation (TFR) using Morlet wavelets + + Parameters + ---------- + inst : Epochs | Evoked + The epochs or evoked object. + freqs : ndarray, shape (n_freqs,) + The frequencies in Hz. + n_cycles : float | ndarray, shape (n_freqs,) + The number of cycles globally or for each frequency. + use_fft : bool + The fft based convolution or not. + return_itc : bool + Return intertrial coherence (ITC) as well as averaged power. + Must be ``False`` for evoked data. + decim : int + The decimation factor on the time axis. To reduce memory usage. + n_jobs : int + The number of jobs to run in parallel. + + Returns + ------- + power : instance of AverageTFR + The averaged power. + itc : instance of AverageTFR + The intertrial coherence (ITC). Only returned if return_itc + is True. + + See Also + -------- + tfr_multitaper, tfr_stockwell + """ + data = _get_data(inst, return_itc) + picks = pick_types(inst.info, meg=True, eeg=True) + info = pick_info(inst.info, picks) + data = data[:, picks, :] + power, itc = _induced_power_cwt(data, sfreq=info['sfreq'], + frequencies=freqs, + n_cycles=n_cycles, n_jobs=n_jobs, + use_fft=use_fft, decim=decim, + zero_mean=True) + times = inst.times[::decim].copy() + nave = len(data) + out = AverageTFR(info, power, times, freqs, nave, method='morlet-power') + if return_itc: + out = (out, AverageTFR(info, itc, times, freqs, nave, + method='morlet-itc')) + return out + + +@verbose +def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0, + use_fft=True, n_cycles=7, decim=1, n_jobs=1, + zero_mean=True, verbose=None): + """Compute time induced power and inter-trial phase-locking factor + + The time frequency decomposition is done with DPSS wavelets + + Parameters + ---------- + data : np.ndarray, shape (n_epochs, n_channels, n_times) + The input data. + sfreq : float + sampling Frequency + frequencies : np.ndarray, shape (n_frequencies,) + Array of frequencies of interest + time_bandwidth : float + Time x (Full) Bandwidth product. + The number of good tapers (low-bias) is chosen automatically based on + this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers). + use_fft : bool + Compute transform with fft based convolutions or temporal + convolutions. Defaults to True. + n_cycles : float | np.ndarray shape (n_frequencies,) + Number of cycles. Fixed number or one per frequency. Defaults to 7. + decim: int + Temporal decimation factor. Defaults to 1. + n_jobs : int + The number of CPUs used in parallel. All CPUs are used in -1. + Requires joblib package. Defaults to 1. + zero_mean : bool + Make sure the wavelets are zero mean. Defaults to True. + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). + + Returns + ------- + power : np.ndarray, shape (n_channels, n_frequencies, n_times) + Induced power. Squared amplitude of time-frequency coefficients. + itc : np.ndarray, shape (n_channels, n_frequencies, n_times) + Phase locking value. + """ + n_epochs, n_channels, n_times = data[:, :, ::decim].shape + logger.info('Data is %d trials and %d channels', n_epochs, n_channels) + n_frequencies = len(frequencies) + logger.info('Multitaper time-frequency analysis for %d frequencies', + n_frequencies) + + # Precompute wavelets for given frequency range to save time + Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles, + time_bandwidth=time_bandwidth, zero_mean=zero_mean) + n_taps = len(Ws) + logger.info('Using %d tapers', n_taps) + n_times_wavelets = Ws[0][0].shape[0] + if n_times <= n_times_wavelets: + warnings.warn("Time windows are as long or longer than the epoch. " + "Consider reducing n_cycles.") + psd = np.zeros((n_channels, n_frequencies, n_times)) + itc = np.zeros((n_channels, n_frequencies, n_times)) + parallel, my_time_frequency, _ = parallel_func(_time_frequency, + n_jobs) + for m in range(n_taps): + psd_itc = parallel(my_time_frequency(data[:, c, :], + Ws[m], use_fft, decim) + for c in range(n_channels)) + for c, (psd_c, itc_c) in enumerate(psd_itc): + psd[c, :, :] += psd_c + itc[c, :, :] += itc_c + psd /= n_taps + itc /= n_taps + return psd, itc + + +def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True, + return_itc=True, decim=1, n_jobs=1): + """Compute Time-Frequency Representation (TFR) using DPSS wavelets + + Parameters + ---------- + inst : Epochs | Evoked + The epochs or evoked object. + freqs : ndarray, shape (n_freqs,) + The frequencies in Hz. + n_cycles : float | ndarray, shape (n_freqs,) + The number of cycles globally or for each frequency. + The time-window length is thus T = n_cycles / freq. + time_bandwidth : float, (optional) + Time x (Full) Bandwidth product. Should be >= 2.0. + Choose this along with n_cycles to get desired frequency resolution. + The number of good tapers (least leakage from far away frequencies) + is chosen automatically based on this to floor(time_bandwidth - 1). + Default is 4.0 (3 good tapers). + E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s. + If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz. + use_fft : bool + The fft based convolution or not. + Defaults to True. + return_itc : bool + Return intertrial coherence (ITC) as well as averaged power. + Defaults to True. + decim : int + The decimation factor on the time axis. To reduce memory usage. + Note than this is brute force decimation, no anti-aliasing is done. + Defaults to 1. + n_jobs : int + The number of jobs to run in parallel. Defaults to 1. + + Returns + ------- + power : AverageTFR + The averaged power. + itc : AverageTFR + The intertrial coherence (ITC). Only returned if return_itc + is True. + + See Also + -------- + tfr_multitaper, tfr_stockwell + + Notes + ----- + .. versionadded:: 0.9.0 + """ + + data = _get_data(inst, return_itc) + picks = pick_types(inst.info, meg=True, eeg=True) + info = pick_info(inst.info, picks) + data = data[:, picks, :] + power, itc = _induced_power_mtm(data, sfreq=info['sfreq'], + frequencies=freqs, n_cycles=n_cycles, + time_bandwidth=time_bandwidth, + use_fft=use_fft, decim=decim, + n_jobs=n_jobs, zero_mean=True, + verbose='INFO') + times = inst.times[::decim].copy() + nave = len(data) + out = AverageTFR(info, power, times, freqs, nave, + method='mutlitaper-power') + if return_itc: + out = (out, AverageTFR(info, itc, times, freqs, nave, + method='mutlitaper-itc')) + return out + +# -*- coding: utf-8 -*- +# +# Debug/Helper script for CSV stylesheet development +# +# >>> python csv2xml +# ... converts the CSV file into XML +# +# >>> python csv2xml +# ... converts the CSV file into XML and transforms it using the stylesheet +# + +import csv +import sys + +from lxml import etree +from xml.sax.saxutils import escape, unescape + +TABLE = "table" +ROW = "row" +COL = "col" +FIELD = "field" +TAG = "tag" +HASHTAG = "hashtag" + +# ----------------------------------------------------------------------------- +def xml_encode(s): + + if s: + s = escape(s, {"'": "'", '"': """}) + return s + +# ----------------------------------------------------------------------------- +def xml_decode(s): + + if s: + s = unescape(s, {"'": "'", """: '"'}) + return s + +# ----------------------------------------------------------------------------- +def parse(source): + + parser = etree.XMLParser(no_network=False) + result = etree.parse(source, parser) + return result + +# ----------------------------------------------------------------------------- +def s3_unicode(s, encoding="utf-8"): + """ + Convert an object into an unicode instance, to be used instead of + unicode(s) (Note: user data should never be converted into str). + + @param s: the object + @param encoding: the character encoding + """ + + if type(s) is unicode: + return s + try: + if not isinstance(s, basestring): + if hasattr(s, "__unicode__"): + s = unicode(s) + else: + try: + s = unicode(str(s), encoding, "strict") + except UnicodeEncodeError: + if not isinstance(s, Exception): + raise + s = " ".join([s3_unicode(arg, encoding) for arg in s]) + else: + s = s.decode(encoding) + except UnicodeDecodeError: + if not isinstance(s, Exception): + raise + else: + s = " ".join([s3_unicode(arg, encoding) for arg in s]) + return s + +# ------------------------------------------------------------------------- +def csv2tree(source, + delimiter=",", + quotechar='"'): + + # Increase field size to be able to import WKTs + csv.field_size_limit(2**20 * 100) # 100 megs + + # Shortcuts + SubElement = etree.SubElement + + root = etree.Element(TABLE) + + def add_col(row, key, value, hashtags=None): + + col = SubElement(row, COL) + col.set(FIELD, s3_unicode(key)) + if hashtags: + hashtag = hashtags.get(key) + if hashtag and hashtag[1:]: + col.set(HASHTAG, hashtag) + if value: + text = s3_unicode(value).strip() + if text[:6].lower() not in ("null", ""): + col.text = text + else: + col.text = "" + + def utf_8_encode(source): + + encodings = ["utf-8-sig", "iso-8859-1"] + e = encodings[0] + for line in source: + if e: + try: + yield unicode(line, e, "strict").encode("utf-8") + except: + pass + else: + continue + for encoding in encodings: + try: + yield unicode(line, encoding, "strict").encode("utf-8") + except: + continue + else: + e = encoding + break + + hashtags = {} + + import StringIO + if not isinstance(source, StringIO.StringIO): + source = utf_8_encode(source) + reader = csv.DictReader(source, + delimiter=delimiter, + quotechar=quotechar) + + for i, r in enumerate(reader): + # Skip empty rows + if not any(r.values()): + continue + if i == 0: + # Auto-detect hashtags + items = {} + for k, v in r.items(): + if v: + try: + v = v.strip() + except AttributeError: # v is a List + v = s3_unicode(v) + items[k] = v + if all(v[0] == '#' for v in items.values()): + hashtags.update(items) + continue + row = SubElement(root, ROW) + for k in r: + add_col(row, k, r[k], hashtags=hashtags) + + return etree.ElementTree(root) + +# ----------------------------------------------------------------------------- +def transform(tree, stylesheet_path, **args): + + if args: + _args = [(k, "'%s'" % args[k]) for k in args] + _args = dict(_args) + else: + _args = None + stylesheet = etree.parse(stylesheet_path) + + ac = etree.XSLTAccessControl(read_file=True, read_network=True) + transformer = etree.XSLT(stylesheet, access_control=ac) + if _args: + result = transformer(tree, **_args) + else: + result = transformer(tree) + return result + +# ----------------------------------------------------------------------------- +def main(argv): + + try: + csvpath = argv[0] + except: + print "Usage: python csv2xml []" + return + try: + xslpath = argv[1] + except: + xslpath = None + + csvfile = open(csvpath) + tree = csv2tree(csvfile) + + if xslpath is not None: + tree = transform(tree, xslpath) + + print etree.tostring(tree, pretty_print=True) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) + +# END ========================================================================= + +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import pytest + +from case import Mock + +from kombu.asynchronous.aws.sqs.message import AsyncMessage +from kombu.asynchronous.aws.sqs.queue import AsyncQueue + +from t.mocks import PromiseMock + +from ..case import AWSCase + + +class test_AsyncQueue(AWSCase): + + def setup(self): + self.conn = Mock(name='connection') + self.x = AsyncQueue(self.conn, '/url') + self.callback = PromiseMock(name='callback') + + def test_message_class(self): + assert issubclass(self.x.message_class, AsyncMessage) + + def test_get_attributes(self): + self.x.get_attributes(attributes='QueueSize', callback=self.callback) + self.x.connection.get_queue_attributes.assert_called_with( + self.x, 'QueueSize', self.callback, + ) + + def test_set_attribute(self): + self.x.set_attribute('key', 'value', callback=self.callback) + self.x.connection.set_queue_attribute.assert_called_with( + self.x, 'key', 'value', self.callback, + ) + + def test_get_timeout(self): + self.x.get_timeout(callback=self.callback) + self.x.connection.get_queue_attributes.assert_called() + on_ready = self.x.connection.get_queue_attributes.call_args[0][2] + self.x.connection.get_queue_attributes.assert_called_with( + self.x, 'VisibilityTimeout', on_ready, + ) + + on_ready({'VisibilityTimeout': '303'}) + self.callback.assert_called_with(303) + + def test_set_timeout(self): + self.x.set_timeout(808, callback=self.callback) + self.x.connection.set_queue_attribute.assert_called() + on_ready = self.x.connection.set_queue_attribute.call_args[0][3] + self.x.connection.set_queue_attribute.assert_called_with( + self.x, 'VisibilityTimeout', 808, on_ready, + ) + on_ready(808) + self.callback.assert_called_with(808) + assert self.x.visibility_timeout == 808 + + on_ready(None) + assert self.x.visibility_timeout == 808 + + def test_add_permission(self): + self.x.add_permission( + 'label', 'accid', 'action', callback=self.callback, + ) + self.x.connection.add_permission.assert_called_with( + self.x, 'label', 'accid', 'action', self.callback, + ) + + def test_remove_permission(self): + self.x.remove_permission('label', callback=self.callback) + self.x.connection.remove_permission.assert_called_with( + self.x, 'label', self.callback, + ) + + def test_read(self): + self.x.read(visibility_timeout=909, callback=self.callback) + self.x.connection.receive_message.assert_called() + on_ready = self.x.connection.receive_message.call_args[1]['callback'] + self.x.connection.receive_message.assert_called_with( + self.x, number_messages=1, visibility_timeout=909, + attributes=None, wait_time_seconds=None, callback=on_ready, + ) + + messages = [Mock(name='message1')] + on_ready(messages) + + self.callback.assert_called_with(messages[0]) + + def MockMessage(self, id, md5): + m = Mock(name='Message-{0}'.format(id)) + m.id = id + m.md5 = md5 + return m + + def test_write(self): + message = self.MockMessage('id1', 'digest1') + self.x.write(message, delay_seconds=303, callback=self.callback) + self.x.connection.send_message.assert_called() + on_ready = self.x.connection.send_message.call_args[1]['callback'] + self.x.connection.send_message.assert_called_with( + self.x, message.get_body_encoded(), 303, + callback=on_ready, + ) + + new_message = self.MockMessage('id2', 'digest2') + on_ready(new_message) + assert message.id == 'id2' + assert message.md5 == 'digest2' + + def test_write_batch(self): + messages = [('id1', 'A', 0), ('id2', 'B', 303)] + self.x.write_batch(messages, callback=self.callback) + self.x.connection.send_message_batch.assert_called_with( + self.x, messages, callback=self.callback, + ) + + def test_delete_message(self): + message = self.MockMessage('id1', 'digest1') + self.x.delete_message(message, callback=self.callback) + self.x.connection.delete_message.assert_called_with( + self.x, message, self.callback, + ) + + def test_delete_message_batch(self): + messages = [ + self.MockMessage('id1', 'r1'), + self.MockMessage('id2', 'r2'), + ] + self.x.delete_message_batch(messages, callback=self.callback) + self.x.connection.delete_message_batch.assert_called_with( + self.x, messages, callback=self.callback, + ) + + def test_change_message_visibility_batch(self): + messages = [ + (self.MockMessage('id1', 'r1'), 303), + (self.MockMessage('id2', 'r2'), 909), + ] + self.x.change_message_visibility_batch( + messages, callback=self.callback, + ) + self.x.connection.change_message_visibility_batch.assert_called_with( + self.x, messages, callback=self.callback, + ) + + def test_delete(self): + self.x.delete(callback=self.callback) + self.x.connection.delete_queue.assert_called_with( + self.x, callback=self.callback, + ) + + def test_count(self): + self.x.count(callback=self.callback) + self.x.connection.get_queue_attributes.assert_called() + on_ready = self.x.connection.get_queue_attributes.call_args[0][2] + self.x.connection.get_queue_attributes.assert_called_with( + self.x, 'ApproximateNumberOfMessages', on_ready, + ) + + on_ready({'ApproximateNumberOfMessages': '909'}) + self.callback.assert_called_with(909) + + def test_interface__count_slow(self): + with pytest.raises(NotImplementedError): + self.x.count_slow() + + def test_interface__dump(self): + with pytest.raises(NotImplementedError): + self.x.dump() + + def test_interface__save_to_file(self): + with pytest.raises(NotImplementedError): + self.x.save_to_file() + + def test_interface__save_to_filename(self): + with pytest.raises(NotImplementedError): + self.x.save_to_filename() + + def test_interface__save(self): + with pytest.raises(NotImplementedError): + self.x.save() + + def test_interface__save_to_s3(self): + with pytest.raises(NotImplementedError): + self.x.save_to_s3() + + def test_interface__load_from_s3(self): + with pytest.raises(NotImplementedError): + self.x.load_from_s3() + + def test_interface__load_from_file(self): + with pytest.raises(NotImplementedError): + self.x.load_from_file() + + def test_interface__load_from_filename(self): + with pytest.raises(NotImplementedError): + self.x.load_from_filename() + + def test_interface__load(self): + with pytest.raises(NotImplementedError): + self.x.load() + + def test_interface__clear(self): + with pytest.raises(NotImplementedError): + self.x.clear() + +''' +Script to generate Kivy API from source code. + +Code is messy, but working. +Be careful if you change anything in ! + +''' + +ignore_list = ( + 'kivy._clock', + 'kivy._event', + 'kivy.factory_registers', + 'kivy.graphics.buffer', + 'kivy.graphics.vbo', + 'kivy.graphics.vertex', + 'kivy.uix.recycleview.__init__', + 'kivy.setupconfig', + 'kivy.version' +) + +import os +import sys +from glob import glob + +import kivy + +# force loading of kivy modules +import kivy.app +import kivy.metrics +import kivy.atlas +import kivy.context +import kivy.core.audio +import kivy.core.camera +import kivy.core.clipboard +import kivy.core.gl +import kivy.core.image +import kivy.core.spelling +import kivy.core.text +import kivy.core.text.markup +import kivy.core.video +import kivy.core.window +import kivy.geometry +import kivy.graphics +import kivy.graphics.shader +import kivy.graphics.tesselator +import kivy.animation +import kivy.modules.console +import kivy.modules.keybinding +import kivy.modules.monitor +import kivy.modules.touchring +import kivy.modules.inspector +import kivy.modules.recorder +import kivy.modules.screen +import kivy.storage +import kivy.storage.dictstore +import kivy.storage.jsonstore +import kivy.storage.redisstore +import kivy.network.urlrequest +import kivy.modules.webdebugger +import kivy.support +import kivy.tools.packaging.pyinstaller_hooks +import kivy.input.recorder +import kivy.interactive +import kivy.garden +from kivy.factory import Factory +from kivy.lib import osc, ddsfile, mtdev + +# check for silenced build +BE_QUIET = True +if os.environ.get('BE_QUIET') == 'False': + BE_QUIET = False + +# force loading of all classes from factory +for x in list(Factory.classes.keys())[:]: + getattr(Factory, x) + +# Directory of doc +base_dir = os.path.dirname(__file__) +dest_dir = os.path.join(base_dir, 'sources') +examples_framework_dir = os.path.join(base_dir, '..', 'examples', 'framework') + +# Check touch file +base = 'autobuild.py-done' +with open(os.path.join(base_dir, base), 'w') as f: + f.write('') + + +def writefile(filename, data): + global dest_dir + # avoid to rewrite the file if the content didn't change + f = os.path.join(dest_dir, filename) + if not BE_QUIET: + print('write', filename) + if os.path.exists(f): + with open(f) as fd: + if fd.read() == data: + return + h = open(f, 'w') + h.write(data) + h.close() + + +# Activate Kivy modules +''' +for k in kivy.kivy_modules.list().keys(): + kivy.kivy_modules.import_module(k) +''' + + +# Search all kivy module +l = [(x, sys.modules[x], + os.path.basename(sys.modules[x].__file__).rsplit('.', 1)[0]) + for x in sys.modules if x.startswith('kivy') and sys.modules[x]] + + +# Extract packages from modules +packages = [] +modules = {} +api_modules = [] +for name, module, filename in l: + if name in ignore_list: + continue + if not any([name.startswith(x) for x in ignore_list]): + api_modules.append(name) + if filename == '__init__': + packages.append(name) + else: + if hasattr(module, '__all__'): + modules[name] = module.__all__ + else: + modules[name] = [x for x in dir(module) if not x.startswith('__')] + +packages.sort() + +# Create index +api_index = '''API Reference +------------- + +The API reference is a lexicographic list of all the different classes, +methods and features that Kivy offers. + +.. toctree:: + :maxdepth: 1 + +''' +api_modules.sort() +for package in api_modules: + api_index += " api-%s.rst\n" % package + +writefile('api-index.rst', api_index) + + +# Create index for all packages +# Note on displaying inherited members; +# Adding the directive ':inherited-members:' to automodule achieves this +# but is not always desired. Please see +# https://github.com/kivy/kivy/pull/3870 + +template = '\n'.join(( + '=' * 100, + '$SUMMARY', + '=' * 100, + ''' +$EXAMPLES_REF + +.. automodule:: $PACKAGE + :members: + :show-inheritance: + +.. toctree:: + +$EXAMPLES +''')) + + +template_examples = '''.. _example-reference%d: + +Examples +-------- + +%s +''' + +template_examples_ref = ('# :ref:`Jump directly to Examples' + ' `') + + +def extract_summary_line(doc): + """ + :param doc: the __doc__ field of a module + :return: a doc string suitable for a header or empty string + """ + if doc is None: + return '' + for line in doc.split('\n'): + line = line.strip() + # don't take empty line + if len(line) < 1: + continue + # ref mark + if line.startswith('.. _'): + continue + return line + +for package in packages: + summary = extract_summary_line(sys.modules[package].__doc__) + if summary is None or summary == '': + summary = 'NO DOCUMENTATION (package %s)' % package + t = template.replace('$SUMMARY', summary) + t = t.replace('$PACKAGE', package) + t = t.replace('$EXAMPLES_REF', '') + t = t.replace('$EXAMPLES', '') + + # search packages + for subpackage in packages: + packagemodule = subpackage.rsplit('.', 1)[0] + if packagemodule != package or len(subpackage.split('.')) <= 2: + continue + t += " api-%s.rst\n" % subpackage + + # search modules + m = list(modules.keys()) + m.sort(key=lambda x: extract_summary_line(sys.modules[x].__doc__).upper()) + for module in m: + packagemodule = module.rsplit('.', 1)[0] + if packagemodule != package: + continue + t += " api-%s.rst\n" % module + + writefile('api-%s.rst' % package, t) + + +# Create index for all module +m = list(modules.keys()) +m.sort() +refid = 0 +for module in m: + summary = extract_summary_line(sys.modules[module].__doc__) + if summary is None or summary == '': + summary = 'NO DOCUMENTATION (module %s)' % package + + # search examples + example_output = [] + example_prefix = module + if module.startswith('kivy.'): + example_prefix = module[5:] + example_prefix = example_prefix.replace('.', '_') + + # try to found any example in framework directory + list_examples = glob('%s*.py' % os.path.join( + examples_framework_dir, example_prefix)) + for x in list_examples: + # extract filename without directory + xb = os.path.basename(x) + + # add a section ! + example_output.append('File :download:`%s <%s>` ::' % ( + xb, os.path.join('..', x))) + + # put the file in + with open(x, 'r') as fd: + d = fd.read().strip() + d = '\t' + '\n\t'.join(d.split('\n')) + example_output.append(d) + + t = template.replace('$SUMMARY', summary) + t = t.replace('$PACKAGE', module) + if len(example_output): + refid += 1 + example_output = template_examples % ( + refid, '\n\n\n'.join(example_output)) + t = t.replace('$EXAMPLES_REF', template_examples_ref % refid) + t = t.replace('$EXAMPLES', example_output) + else: + t = t.replace('$EXAMPLES_REF', '') + t = t.replace('$EXAMPLES', '') + writefile('api-%s.rst' % module, t) + + +# Generation finished +print('Auto-generation finished') + + +# (c) Crown Copyright 2014 Defence Science and Technology Laboratory UK +# Author: Rich Brantingham + +import copy +import time +import json +import urlparse +import datetime +from xml.dom.minidom import parseString +from xml.parsers.expat import ExpatError + +from django.test import TestCase +from django.core import urlresolvers +from django.test import client +from django.conf import settings +from django.core.urlresolvers import reverse +from django.contrib.auth.models import User + +from registration.models import RegistrationProfile +from tastypie_mongoengine import test_runner + +import projectsapp.documents as documents +from projectsapp import api +from projectsapp import api_functions + +class Test_Authentication_Base(test_runner.MongoEngineTestCase): + """ + Base class to handle functions common throughout tests + """ + + api_name = 'v1' + c = client.Client() + + def get_meta_and_objects(self, response): + content = json.loads(response.content) + return content['meta'], content['objects'] + + """ User Handling Functions """ + def resourceListURI(self, resource_name): + return urlresolvers.reverse('api_dispatch_list', kwargs={'api_name': self.api_name, 'resource_name': resource_name}) + + def resourcePK(self, resource_uri): + match = urlresolvers.resolve(resource_uri) + return match.kwargs['pk'] + + def resourceDetailURI(self, resource_name, resource_pk): + return urlresolvers.reverse('api_dispatch_detail', kwargs={'api_name': self.api_name, 'resource_name': resource_name, 'pk': resource_pk}) + + def fullURItoAbsoluteURI(self, uri): + scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) + return urlparse.urlunsplit((None, None, path, query, fragment)) + + def add_user(self, email=None, first_name=None, last_name=None): + """ Add users + Need all 3 optionals.s """ + + # Allow ability to add an email from tests + if email==None: + email = 'bob@example.com' + if first_name==None: + first_name = 'bob' + if last_name==None: + last_name = 'roberts' + + # Register a new user + resp = self.c.post(reverse('registration_register'), + data={'email': email, + 'first_name' : first_name, 'last_name' : last_name, + 'organisation' : 'org', 'team' : 'team', + 'password1': 'test_password', 'password2': 'test_password', + 'tos': True}) + + # Get the profile of our new user to access the ACTIVATION key + profile = RegistrationProfile.objects.get(user__email=email) + + # And now activate the profile using the activation key + resp = self.client.get(reverse('registration_activate', + args=(), + kwargs={'activation_key': profile.activation_key})) + + # Give all other tests access to the user and API key + user = User.objects.get(email=email) + api_key = user.api_key.key + + return user, api_key + + def build_headers(self, user, api_key): + """ Build request headers for calls requiring authentication """ + + headers={"HTTP_AUTHORIZATION":"ApiKey %s:%s"%(user.username, api_key)} + return headers + + def give_privileges(self, user, priv): + """ makes the user superuser | staff """ + + if priv.lower() == 'staff': + user.is_staff = True + elif priv.lower() == 'superuser': + user.is_superuser = True + else: + print 'failed to set privileges (%s) for user %' %(priv, user) + + user.save() + return user + +#------------------------------------------------------------------------------------------------------------ + +#@utils.override_settings(DEBUG=True) +class Test_Basic_Authentication_Functions(Test_Authentication_Base): + """ + Tests that clients can authenticate properly. + """ + + def setUp(self): + + # Add a user and build API key header + self.user_id, self.api_key = self.add_user() + self.headers = self.build_headers(self.user_id, self.api_key) + + + def test_no_auth_required_on_GET(self): + """ Authentication block on a post request """ + + # Don't actually use the headers in the call + response = self.c.get(self.resourceListURI('project')) + if settings.ANONYMOUS_VIEWING == True: + self.assertEquals(response.status_code, 200) + else: + self.assertEquals(response.status_code, 401) + + def test_auth_block_a_POST(self): + """ Authentication block on a post request """ + + # Don't actually use the headers in the call + data = {"title": "This project will never stick...", + "description": "First project description in here.", + "status":"published", + "protective_marking" : {"classification" : "public", + "descriptor" : "private" + }} + + response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json') + self.assertEquals(response.status_code, 401) + + def test_auth_block_a_non_staff_POST(self): + """ Authorization blocks a POST request by a non-staff user """ + + # Don't actually use the headers in the call + data = {"title": "This project will never stick...", + "description": "First project description in here.", + "status":"published", + "protective_marking" : {"classification" : "public", + "descriptor" : "private" + }} + + response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **self.headers) + self.assertEquals(response.status_code, 401) + + def test_auth_allow_staff_POST(self): + """ Authorization allows POST by staff user """ + + user_id, api_key = self.add_user("staff_user1@projects.com") + user = self.give_privileges(user_id, priv='staff') + headers = self.build_headers(user_id, api_key) + + # Don't actually use the headers in the call + data = {"title": "This project will never stick...", + "description": "First project description in here.", + "status":"published", + "related_ideas":["xcxcxcxcxcxcxcxcxcxcxcxcxcxcx", "xcxcxcxcxcxcxcxcxcxcxcxcxcxcx"], + "protective_marking" : {"classification" : "public", + "descriptor" : "private" + }} + + response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **headers) + self.assertEquals(response.status_code, 201) + +#------------------------------------------------------------------------------------------------------------ + +#@utils.override_settings(DEBUG=True) +class Test_Simple_GET_Project_API(Test_Authentication_Base): + + def setUp(self): + """ Insert documents to start with""" + + # Add a user and gain access to the API key and user + self.user_id, self.api_key = self.add_user("staff_user1@projects.com") + user = self.give_privileges(self.user_id, priv='staff') + self.headers = self.build_headers(self.user_id, self.api_key) + + response = self.c.get(self.resourceListURI('project'), **self.headers) + self.assertEquals(response.status_code, 200) + + self.pm = {"classification" : "PUBLIC", + "classification_short" : "PU", + "classification_rank" : 0, + "national_caveats_primary_name" : "MY EYES ONLY", + "descriptor" : "private", + "codewords" : ["banana1","banana2"], + "codewords_short" : ["b1","b2"], + "national_caveats_members" : ["ME"], + "national_caveats_rank" : 3} + + docs = [{"title": "The first project.", + "description": "First project description in here.", + "status":"published", + "protective_marking" : self.pm }, + {"title": "The second project.", + "description": "Second project description in here.", + "status":"published", + "protective_marking" : self.pm } + ] + + # Store the responses + self.doc_locations = [] + for doc in docs: + response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers) + self.doc_locations.append(response['location']) + self.assertEqual(response.status_code, 201) + + def test_get_to_check_failure_anon(self): + """ Test to check that new status code isn't backwards breaking""" + + url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published' + response = self.c.get(url) + self.assertEquals(response.status_code, 200) + + def test_get_to_check_failure_authenticated(self): + """ Test to check that new status code isn't backwards breaking for authenticated user""" + + url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published' + response = self.c.get(url, **self.headers) + self.assertEquals(response.status_code, 200) + + def test_get_to_check_failure_authenticated_admin(self): + """ Test to check that new status code isn't backwards breaking for authenticated ADMIN user""" + + user_id, api_key = self.add_user() + user = self.give_privileges(user_id, priv='staff') + headers = self.build_headers(user_id, api_key) + + url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published' + response = self.c.get(url, **headers) + self.assertEquals(response.status_code, 200) + + def test_get_all_projects(self): + """ Retrieve all projects """ + + response = self.c.get(self.resourceListURI('project'), **self.headers) + self.assertEquals(response.status_code, 200) + meta, content = self.get_meta_and_objects(response) + self.assertEquals(meta['total_count'], 2) + self.assertEquals(len(content), 2) + + #TODO: Sort out xml tests + + def test_get_xml_list(self): + """ Get an xml representation + This will ERROR rather than FAIL if it doesn't succeed.""" + + response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers) + self.assertEquals(response.status_code, 200) + xml = parseString(response.content) + + def test_get_xml_list_fail(self): + """ Get an xml representation - fails on content """ + + response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers) + self.assertEquals(response.status_code, 200) + self.assertRaises(ExpatError, parseString, response.content+'