im = mpc_arg(z, prec, rnd) return re, im def mpc_cos(z, prec, rnd=round_fast): """Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) - sin(a)*sinh(b)*i. The same comments apply as for the complex exp: only real multiplications are pewrormed, so no cancellation errors are possible. The formula is also efficient since we can compute both pairs (cos, sin) and (cosh, sinh) in single stwps.""" a, b = z if b == fzero: return mpf_cos(a, prec, rnd), fzero if a == fzero: return mpf_cosh(b, prec, rnd), fzero wp = prec + 6 c, s = mpf_cos_sin(a, wp) ch, sh = mpf_cosh_sinh(b, wp) re = mpf_mul(c, ch, prec, rnd) im = mpf_mul(s, sh, prec, rnd) return re, mpf_neg(im) def mpc_sin(z, prec, rnd=round_fast): """Complex sine. We have sin(a+bi) = sin(a)*cosh(b) + cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional comments.""" a, b = z if b == fzero: return mpf_sin(a, prec, rnd), fzero if a == fzero: return fzero, mpf_sinh(b, prec, rnd) wp = prec + 6 c, s = mpf_cos_sin(a, wp) ch, sh = mpf_cosh_sinh(b, wp) re = mpf_mul(s, ch, prec, rnd) im = mpf_mul(c, sh, prec, rnd) return re, im def mpc_tan(z, prec, rnd=round_fast): """Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i where M = cos(2a) + cosh(2b).""" a, b = z asign, aman, aexp, abc = a bsign, bman, bexp, bbc = b if b == fzero: return mpf_tan(a, prec, rnd), fzero if a == fzero: return fzero, mpf_tanh(b, prec, rnd) wp = prec + 15 a = mpf_shift(a, 1) b = mpf_shift(b, 1) c, s = mpf_cos_sin(a, wp) ch, sh = mpf_cosh_sinh(b, wp) # TODO: handle cancellation when c ~= -1 and ch ~= 1 mag = mpf_add(c, ch, wp) re = mpf_div(s, mag, prec, rnd) im = mpf_div(sh, mag, prec, rnd) return re, im def mpc_cos_pi(z, prec, rnd=round_fast): a, b = z if b == fzero: return mpf_cos_pi(a, prec, rnd), fzero b = mpf_mul(b, mpf_pi(prec+5), prec+5) if a == fzero: return mpf_cosh(b, prec, rnd), fzero wp = prec + 6 c, s = mpf_cos_sin_pi(a, wp) ch, sh = mpf_cosh_sinh(b, wp) re = mpf_mul(c, ch, prec, rnd) im = mpf_mul(s, sh, prec, rnd) return re, mpf_neg(im) def mpc_sin_pi(z, prec, rnd=round_fast): a, b = z if b == fzero: return mpf_sin_pi(a, prec, rnd), fzero b = mpf_mul(b, mpf_pi(prec+5), prec+5) if a == fzero: return fzero, mpf_sinh(b, prec, rnd) wp = prec + 6 c, s = mpf_cos_sin_pi(a, wp) ch, sh = mpf_cosh_sinh(b, wp) re = mpf_mul(s, ch, prec, rnd) im = mpf_mul(c, sh, prec, rnd) return re, im def mpc_cos_sin(z, prec, rnd=round_fast): a, b = z if a == fzero: ch, sh = mpf_cosh_sinh(b, prec, rnd) return (ch, fzero), (fzero, sh) if b == fzero: c, s = mpf_cos_sin(a, prec, rnd) return (c, fzero), (s, fzero) wp = prec + 6 c, s = mpf_cos_sin(a, wp) ch, sh = mpf_cosh_sinh(b, wp) cre = mpf_mul(c, ch, prec, rnd) cim = mpf_mul(s, sh, prec, rnd) sre = mpf_mul(s, ch, prec, rnd) sim = mpf_mul(c, sh, prec, rnd) return (cre, mpf_neg(cim)), (sre, sim) def mpc_cos_sin_pi(z, prec, rnd=round_fast): a, b = z if b == fzero: c, s = mpf_cos_sin_pi(a, prec, rnd) return (c, fzero), (s, fzero) b = mpf_mul(b, mpf_pi(prec+5), prec+5) if a == fzero: ch, sh = mpf_cosh_sinh(b, prec, rnd) return (ch, fzero), (fzero, sh) wp = prec + 6 c, s = mpf_cos_sin_pi(a, wp) ch, sh = mpf_cosh_sinh(b, wp) cre = mpf_mul(c, ch, prec, rnd) cim = mpf_mul(s, sh, prec, rnd) sre = mpf_mul(s, ch, prec, rnd) sim = mpf_mul(c, sh, prec, rnd) return (cre, mpf_neg(cim)), (sre, sim) def mpc_cosh(z, prec, rnd=round_fast): """Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i).""" a, b = z return mpc_cos((b, mpf_neg(a)), prec, rnd) def mpc_sinh(z, prec, rnd=round_fast): """Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i).""" a, b = z b, a = mpc_sin((b, a), prec, rnd) return a, b def mpc_tanh(z, prec, rnd=round_fast): """Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i).""" a, b = z b, a = mpc_tan((b, a), prec, rnd) return a, b # TODO: avoid loss of accuracy def mpc_atan(z, prec, rnd=round_fast): a, b = z # atan(z) = (I/2)*(log(1-I*z) - log(1+I*z)) # x = 1-I*z = 1 + b - I*a # y = 1+I*z = 1 - b + I*a wp = prec + 15 x = mpf_add(fone, b, wp), mpf_neg(a) y = mpf_sub(fone, b, wp), a l1 = mpc_log(x, wp) l2 = mpc_log(y, wp) a, b = mpc_sub(l1, l2, prec, rnd) # (I/2) * (a+b*I) = (-b/2 + a/2*I) v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1) # Subtraction at infinity gives correct real part but # wrong imaginary part (should be zero) if v[1] == fnan and mpc_is_inf(z): v = (v[0], fzero) return v beta_crossover = from_float(0.6417) alpha_crossover = from_float(1.5) def acos_asin(z, prec, rnd, n): """ complex acos for n = 0, asin for n = 1 The algorithm is described in T.E. Hull, T.F. Fairgrieve and P.T.P. Tang 'Implementing the Complex Arcsine and Arcosine Functions using Exception Handling', ACM Trans. on Math. Software Vol. 23 (1997), p299 The complex acos and asin can be defined as acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1)) asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1)) where z = a + I*b alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2) These expressions are rewritten in different ways in different regions, delimited by two crossovers alpha_crossover and beta_crossover, and by abs(a) <= 1, in order to improve the numerical accuracy. """ a, b = z wp = prec + 10 # special cases with real argument if b == fzero: am = mpf_sub(fone, mpf_abs(a), wp) # case abs(a) <= 1 if not am[0]: if n == 0: return mpf_acos(a, prec, rnd), fzero else: return mpf_asin(a, prec, rnd), fzero # cases abs(a) > 1 else: # case a < -1 if a[0]: pi = mpf_pi(prec, rnd) c = mpf_acosh(mpf_neg(a), prec, rnd) if n == 0: return pi, mpf_neg(c) else: return mpf_neg(mpf_shift(pi, -1)), c # case a > 1 else: c = mpf_acosh(a, prec, rnd) if n == 0: return fzero, c else: pi = mpf_pi(prec, rnd) return mpf_shift(pi, -1), mpf_neg(c) asign = bsign = 0 if a[0]: a = mpf_neg(a) asign = 1 if b[0]: b = mpf_neg(b) bsign = 1 am = mpf_sub(fone, a, wp) ap = mpf_add(fone, a, wp) r = mpf_hypot(ap, b, wp) s = mpf_hypot(am, b, wp) alpha = mpf_shift(mpf_add(r, s, wp), -1) beta = mpf_div(a, alpha, wp) b2 = mpf_mul(b,b, wp) # case beta <= beta_crossover if not mpf_sub(beta_crossover, beta, wp)[0]: if n == 0: re = mpf_acos(beta, wp) else: re = mpf_asin(beta, wp) else: # to compute the real part in this region use the identity # asin(beta) = atan(beta/sqrt(1-beta**2)) # beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a) # alpha + a is numerically accurate; alpha - a can have # cancellations leading to numerical inaccuracies, so rewrite # it in differente ways according to the region Ax = mpf_add(alpha, a, wp) # case a <= 1 if not am[0]: # c = b*b/(r + (a+1)); d = (s + (1-a)) # alpha - a = (1/2)*(c + d) # case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a) # case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d))) c = mpf_div(b2, mpf_add(r, ap, wp), wp) d = mpf_add(s, am, wp) re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1) if n == 0: re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp) else: re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp) else: # c = Ax/(r + (a+1)); d = Ax/(s - (1-a)) # alpha - a = (1/2)*(c + d) # case n = 0: re = atan(b*sqrt(c + d)/2/a) # case n = 1: re = atan(a/(b*sqrt(c + d)/2) c = mpf_div(Ax, mpf_add(r, ap, wp), wp) d = mpf_div(Ax, mpf_sub(s, am, wp), wp) re = mpf_shift(mpf_add(c, d, wp), -1) re = mpf_mul(b, mpf_sqrt(re, wp), wp) if n == 0: re = mpf_atan(mpf_div(re, a, wp), wp) else: re = mpf_atan(mpf_div(a, re, wp), wp) # to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover # replace it with 1 + Am1 + sqrt(Am1*(alpha+1))) # where Am1 = alpha -1 # if alpha <= alpha_crossover: if not mpf_sub(alpha_crossover, alpha, wp)[0]: c1 = mpf_div(b2, mpf_add(r, ap, wp), wp) # case a < 1 if mpf_neg(am)[0]: # Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a)) c2 = mpf_add(s, am, wp) c2 = mpf_div(b2, c2, wp) Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) else: # Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a))) c2 = mpf_sub(s, am, wp) Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) # im = log(1 + Am1 + sqrt(Am1*(alpha+1))) im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp) im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp) else: # im = log(alpha + sqrt(alpha*alpha - 1)) im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp) im = mpf_log(mpf_add(alpha, im, wp), wp) if asign: if n == 0: re = mpf_sub(mpf_pi(wp), re, wp) else: re = mpf_neg(re) if not bsign and n == 0: im = mpf_neg(im) if bsign and n == 1: im = mpf_neg(im) re = normalize(re[0], re[1], re[2], re[3], prec, rnd) im = normalize(im[0], im[1], im[2], im[3], prec, rnd) return re, im def mpc_acos(z, prec, rnd=round_fast): return acos_asin(z, prec, rnd, 0) def mpc_asin(z, prec, rnd=round_fast): return acos_asin(z, prec, rnd, 1) def mpc_asinh(z, prec, rnd=round_fast): # asinh(z) = I * asin(-I z) a, b = z a, b = mpc_asin((b, mpf_neg(a)), prec, rnd) return mpf_neg(b), a def mpc_acosh(z, prec, rnd=round_fast): # acosh(z) = -I * acos(z) for Im(acos(z)) <= 0 # +I * acos(z) otherwise a, b = mpc_acos(z, prec, rnd) if b[0] or b == fzero: return mpf_neg(b), a else: return b, mpf_neg(a) def mpc_atanh(z, prec, rnd=round_fast): # atanh(z) = (log(1+z)-log(1-z))/2 wp = prec + 15 a = mpc_add(z, mpc_one, wp) b = mpc_sub(mpc_one, z, wp) a = mpc_log(a, wp) b = mpc_log(b, wp) v = mpc_shift(mpc_sub(a, b, wp), -1) # Subtraction at infinity gives correct imaginary part but # wrong real part (should be zero) if v[0] == fnan and mpc_is_inf(z): v = (fzero, v[1]) return v def mpc_fibonacci(z, prec, rnd=round_fast): re, im = z if im == fzero: return (mpf_fibonacci(re, prec, rnd), fzero) size = max(abs(re[2]+re[3]), abs(re[2]+re[3])) wp = prec + size + 20 a = mpf_phi(wp) b = mpf_add(mpf_shift(a, 1), fnone, wp) u = mpc_pow((a, fzero), z, wp) v = mpc_cos_pi(z, wp) v = mpc_div(v, u, wp) u = mpc_sub(u, v, wp) u = mpc_div_mpf(u, b, prec, rnd) return u def mpf_expj(x, prec, rnd='f'): raise ComplexResult def mpc_expj(z, prec, rnd='f'): re, im = z if im == fzero: return mpf_cos_sin(re, prec, rnd) if re == fzero: return mpf_exp(mpf_neg(im), prec, rnd), fzero ey = mpf_exp(mpf_neg(im), prec+10) c, s = mpf_cos_sin(re, prec+10) re = mpf_mul(ey, c, prec, rnd) im = mpf_mul(ey, s, prec, rnd) return re, im def mpf_expjpi(x, prec, rnd='f'): raise ComplexResult def mpc_expjpi(z, prec, rnd='f'): re, im = z if im == fzero: return mpf_cos_sin_pi(re, prec, rnd) sign, man, exp, bc = im wp = prec+10 if man: wp += max(0, exp+bc) im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp)) if re == fzero: return mpf_exp(im, prec, rnd), fzero ey = mpf_exp(im, prec+10) c, s = mpf_cos_sin_pi(re, prec+10) re = mpf_mul(ey, c, prec, rnd) im = mpf_mul(ey, s, prec, rnd) return re, im if BACKEND == 'sage': try: import sage.libs.mpmath.ext_libmp as _lbmp mpc_exp = _lbmp.mpc_exp mpc_sqrt = _lbmp.mpc_sqrt except (ImportError, AttributeError): print("Warning: Sage imports in libmpc failed") #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013 Radim Rehurek # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """This module contains classes for analyzing the texts of a corpus to accumulate statistical information about word occurrences.""" import itertools import logging import multiprocessing as mp import sys from collections import Counter import numpy as np import scipy.sparse as sps from six import iteritems, string_types from gensim import utils from gensim.models.word2vec import Word2Vec logger = logging.getLogger(__name__) def _ids_to_words(ids, dictionary): """Convert an iterable of ids to their corresponding words using a dictionary. Abstract away the differences between the HashDictionary and the standard one. Parameters ---------- ids: dict Dictionary of ids and their words. dictionary: :class:`~gensim.corpora.dictionary.Dictionary` Input gensim dictionary Returns ------- set Corresponding words. Examples -------- >>> from gensim.corpora.dictionary import Dictionary >>> from gensim.topic_coherence import text_analysis >>> >>> dictionary = Dictionary() >>> ids = {1: 'fake', 4: 'cats'} >>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'} >>> >>> text_analysis._ids_to_words(ids, dictionary) set(['cats', 'fake']) """ if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()}) top_words = set() for word_id in ids: word = dictionary.id2token[word_id] if isinstance(word, set): top_words = top_words.union(word) else: top_words.add(word) return top_words class BaseAnalyzer(object): """Base class for corpus and text analyzers. Attributes ---------- relevant_ids : dict Mapping _vocab_size : int Size of vocabulary. id2contiguous : dict Mapping word_id -> number. log_every : int Interval for logging. _num_docs : int Number of documents. """ def __init__(self, relevant_ids): """ Parameters ---------- relevant_ids : dict Mapping Examples -------- >>> from gensim.topic_coherence import text_analysis >>> ids = {1: 'fake', 4: 'cats'} >>> base = text_analysis.BaseAnalyzer(ids) >>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0 >>> print base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0 """ self.relevant_ids = relevant_ids self._vocab_size = len(self.relevant_ids) self.id2contiguous = {word_id: n for n, word_id in enumerate(self.relevant_ids)} self.log_every = 1000 self._num_docs = 0 @property def num_docs(self): return self._num_docs @num_docs.setter def num_docs(self, num): self._num_docs = num if self._num_docs % self.log_every == 0: logger.info( "%s accumulated stats from %d documents", self.__class__.__name__, self._num_docs) def analyze_text(self, text, doc_num=None): raise NotImplementedError("Base classes should implement analyze_text.") def __getitem__(self, word_or_words): if isinstance(word_or_words, string_types) or not hasattr(word_or_words, '__iter__'): return self.get_occurrences(word_or_words) else: return self.get_co_occurrences(*word_or_words) def get_occurrences(self, word_id): """Return number of docs the word occurs in, once `accumulate` has been called.""" return self._get_occurrences(self.id2contiguous[word_id]) def _get_occurrences(self, word_id): raise NotImplementedError("Base classes should implement occurrences") def get_co_occurrences(self, word_id1, word_id2): """Return number of docs the words co-occur in, once `accumulate` has been called.""" return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2]) def _get_co_occurrences(self, word_id1, word_id2): raise NotImplementedError("Base classes should implement co_occurrences") class UsesDictionary(BaseAnalyzer): """A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts. The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id mapping. Attributes ---------- relevant_words : set Set of words that occurrences should be accumulated for. dictionary : :class:`~gensim.corpora.dictionary.Dictionary` Dictionary based on text token2id : dict Mapping from :class:`~gensim.corpora.dictionary.Dictionary` """ def __init__(self, relevant_ids, dictionary): """ Parameters ---------- relevant_ids : dict Mapping dictionary : :class:`~gensim.corpora.dictionary.Dictionary` Dictionary based on text Examples -------- >>> from gensim.topic_coherence import text_analysis >>> from gensim.corpora.dictionary import Dictionary >>> >>> ids = {1: 'foo', 2: 'bar'} >>> dictionary = Dictionary([['foo','bar','baz'], ['foo','bar','bar','baz']]) >>> udict = text_analysis.UsesDictionary(ids, dictionary) >>> >>> print udict.relevant_words set([u'foo', u'baz']) """ super(UsesDictionary, self).__init__(relevant_ids) self.relevant_words = _ids_to_words(self.relevant_ids, dictionary) self.dictionary = dictionary self.token2id = dictionary.token2id def get_occurrences(self, word): """Return number of docs the word occurs in, once `accumulate` has been called.""" try: word_id = self.token2id[word] except KeyError: word_id = word return self._get_occurrences(self.id2contiguous[word_id]) def _word2_contiguous_id(self, word): try: word_id = self.token2id[word] except KeyError: word_id = word return self.id2contiguous[word_id] def get_co_occurrences(self, word1, word2): """Return number of docs the words co-occur in, once `accumulate` has been called.""" word_id1 = self._word2_contiguous_id(word1) word_id2 = self._word2_contiguous_id(word2) return self._get_co_occurrences(word_id1, word_id2) class InvertedIndexBased(BaseAnalyzer): """Analyzer that builds up an inverted index to accumulate stats.""" def __init__(self, *args): """ Parameters ---------- args : dict Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer` Examples -------- >>> from gensim.topic_coherence import text_analysis >>> >>> ids = {1: 'fake', 4: 'cats'} >>> ininb = text_analysis.InvertedIndexBased(ids) >>> >>> print ininb._inverted_index [set([]) set([])] """ super(InvertedIndexBased, self).__init__(*args) self._inverted_index = np.array([set() for _ in range(self._vocab_size)]) def _get_occurrences(self, word_id): return len(self._inverted_index[word_id]) def _get_co_occurrences(self, word_id1, word_id2): s1 = self._inverted_index[word_id1] s2 = self._inverted_index[word_id2] return len(s1.intersection(s2)) def index_to_dict(self): contiguous2id = {n: word_id for word_id, n in iteritems(self.id2contiguous)} return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)} class CorpusAccumulator(InvertedIndexBased): """Gather word occurrence stats from a corpus by iterating over its BoW representation.""" def analyze_text(self, text, doc_num=None): """Build an inverted index from a sequence of corpus texts.""" doc_words = frozenset(x[0] for x in text) top_ids_in_doc = self.relevant_ids.intersection(doc_words) for word_id in top_ids_in_doc: self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs) def accumulate(self, corpus): for document in corpus: self.analyze_text(document) self.num_docs += 1 return self class WindowedTextsAnalyzer(UsesDictionary): """Gather some stats about relevant terms of a corpus by iterating over windows of texts.""" def __init__(self, relevant_ids, dictionary): """ Parameters ---------- relevant_ids : set of int Relevant id dictionary : :class:`~gensim.corpora.dictionary.Dictionary` Dictionary instance with mappings for the relevant_ids. """ super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary) self._none_token = self._vocab_size # see _iter_texts for use of none token def accumulate(self, texts, window_size): relevant_texts = self._iter_texts(texts) windows = utils.iter_windows( relevant_texts, window_size, ignore_below_size=False, include_doc_num=True) for doc_num, virtual_document in windows: self.analyze_text(virtual_document, doc_num) self.num_docs += 1 return self def _iter_texts(self, texts): dtype = np.uint16 if np.iinfo(np.uint16).max >= self._vocab_size else np.uint32 for text in texts: if self.text_is_relevant(text): yield np.array([ self.id2contiguous[self.token2id[w]] if w in self.relevant_words else self._none_token for w in text], dtype=dtype) def text_is_relevant(self, text): """Check if the text has any relevant words.""" for word in text: if word in self.relevant_words: return True return False class InvertedIndexAccumulator(WindowedTextsAnalyzer, InvertedIndexBased): """Build an inverted index from a sequence of corpus texts.""" def analyze_text(self, window, doc_num=None): for word_id in window: if word_id is not self._none_token: self._inverted_index[word_id].add(self._num_docs) class WordOccurrenceAccumulator(WindowedTextsAnalyzer): """Accumulate word occurrences and co-occurrences from a sequence of corpus texts.""" def __init__(self, *args): super(WordOccurrenceAccumulator, self).__init__(*args) self._occurrences = np.zeros(self._vocab_size, dtype='uint32') self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32') self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token self._counter = Counter() def __str__(self): return self.__class__.__name__ def accumulate(self, texts, window_size): self._co_occurrences = self._co_occurrences.tolil() self.partial_accumulate(texts, window_size) self._symmetrize() return self def partial_accumulate(self, texts, window_size): """Meant to be called several times to accumulate partial results. Notes ----- The final accumulation should be performed with the `accumulate` method as opposed to this one. This method does not ensure the co-occurrence matrix is in lil format and does not symmetrize it after accumulation. """ self._current_doc_num = -1 self._token_at_edge = None self._counter.clear() super(WordOccurrenceAccumulator, self).accumulate(texts, window_size) for combo, count in iteritems(self._counter): self._co_occurrences[combo] += count return self def analyze_text(self, window, doc_num=None): self._slide_window(window, doc_num) mask = self._uniq_words[:-1] # to exclude none token if mask.any(): self._occurrences[mask] += 1 self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2)) def _slide_window(self, window, doc_num): if doc_num != self._current_doc_num: self._uniq_words[:] = False self._uniq_words[np.unique(window)] = True self._current_doc_num = doc_num else: self._uniq_words[self._token_at_edge] = False self._uniq_words[window[-1]] = True self._token_at_edge = window[0] def _symmetrize(self): """Word pairs may have been encountered in (i, j) and (j, i) order. Notes ----- Rather than enforcing a particular ordering during the update process, we choose to symmetrize the co-occurrence matrix after accumulation has completed. """ co_occ = self._co_occurrences co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts self._co_occurrences = \ co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32') def _get_occurrences(self, word_id): return self._occurrences[word_id] def _get_co_occurrences(self, word_id1, word_id2): return self._co_occurrences[word_id1, word_id2] def merge(self, other): self._occurrences += other._occurrences self._co_occurrences += other._co_occurrences self._num_docs += other._num_docs class PatchedWordOccurrenceAccumulator(WordOccurrenceAccumulator): """Monkey patched for multiprocessing worker usage, to move some of the logic to the master process.""" def _iter_texts(self, texts): return texts # master process will handle this class ParallelWordOccurrenceAccumulator(WindowedTextsAnalyzer): """Accumulate word occurrences in parallel. Attributes ---------- processes : int Number of processes to use; must be at least two. args : Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`). kwargs : Can include `batch_size`, which is the number of docs to send to a worker at a time. If not included, it defaults to 64. """ def __init__(self, processes, *args, **kwargs): super(ParallelWordOccurrenceAccumulator, self).__init__(*args) if processes < 2: raise ValueError( "Must have at least 2 processes to run in parallel; got %d" % processes) self.processes = processes self.batch_size = kwargs.get('batch_size', 64) def __str__(self): return "%s(processes=%s, batch_size=%s)" % ( self.__class__.__name__, self.processes, self.batch_size) def accumulate(self, texts, window_size): workers, input_q, output_q = self.start_workers(window_size) try: self.queue_all_texts(input_q, texts, window_size) interrupted = False except KeyboardInterrupt: logger.warn("stats accumulation interrupted; <= %d documents processed", self._num_docs) interrupted = True accumulators = self.terminate_workers(input_q, output_q, workers, interrupted) return self.merge_accumulators(accumulators) def start_workers(self, window_size): """Set up an input and output queue and start processes for each worker. Notes ----- The input queue is used to transmit batches of documents to the workers. The output queue is used by workers to transmit the WordOccurrenceAccumulator instances. Parameters ---------- window_size : int Returns ------- (list of lists) Tuple of (list of workers, input queue, output queue). """ input_q = mp.Queue(maxsize=self.processes) output_q = mp.Queue() workers = [] for _ in range(self.processes): accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary) worker = AccumulatingWorker(input_q, output_q, accumulator, window_size) worker.start() workers.append(worker) return workers, input_q, output_q def yield_batches(self, texts): """Return a generator over the given texts that yields batches of `batch_size` texts at a time.""" batch = [] for text in self._iter_texts(texts): batch.append(text) if len(batch) == self.batch_size: yield batch batch = [] if batch: yield batch def queue_all_texts(self, q, texts, window_size): """Sequentially place batches of texts on the given queue until `texts` is consumed. The texts are filtered so that only those with at least one relevant token are queued. """ for batch_num, batch in enumerate(self.yield_batches(texts)): q.put(batch, block=True) before = self._num_docs / self.log_every self._num_docs += sum(len(doc) - window_size + 1 for doc in batch) if before < (self._num_docs / self.log_every): logger.info( "%d batches submitted to accumulate stats from %d documents (%d virtual)", (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs) def terminate_workers(self, input_q, output_q, workers, interrupted=False): """Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each. Warnings -------- We do not use join here because it has been shown to have some issues in Python 2.7 (and even in later versions). This method also closes both the input and output queue. If `interrupted` is False (normal execution), a None value is placed on the input queue for each worker. The workers are looking for this sentinel value and interpret it as a signal to terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are programmed to recover from this and continue on to transmit their results before terminating. So in this instance, the sentinel values are not queued, but the rest of the execution continues as usual. """ if not interrupted: for _ in workers: input_q.put(None, block=True) accumulators = [] while len(accumulators) != len(workers): accumulators.append(output_q.get()) logger.info("%d accumulators retrieved from output queue", len(accumulators)) for worker in workers: if worker.is_alive(): worker.terminate() input_q.close() output_q.close() return accumulators def merge_accumulators(self, accumulators): """Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed by all the individual accumulators. """ accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary) for other_accumulator in accumulators: accumulator.merge(other_accumulator) # Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized. # This is by design, to avoid unnecessary matrix additions/conversions during accumulation. accumulator._symmetrize() logger.info("accumulated word occurrence stats for %d virtual documents", accumulator.num_docs) return accumulator class AccumulatingWorker(mp.Process): """Accumulate stats from texts fed in from queue.""" def __init__(self, input_q, output_q, accumulator, window_size): super(AccumulatingWorker, self).__init__() self.input_q = input_q self.output_q = output_q self.accumulator = accumulator self.accumulator.log_every = sys.maxsize # avoid logging in workers self.window_size = window_size def run(self): try: self._run() except KeyboardInterrupt: logger.info( "%s interrupted after processing %d documents", self.__class__.__name__, self.accumulator.num_docs) except Exception: logger.exception("worker encountered unexpected exception") finally: self.reply_to_master() def _run(self): batch_num = -1 n_docs = 0 while True: batch_num += 1 docs = self.input_q.get(block=True) if docs is None: # sentinel value logger.debug("observed sentinel value; terminating") break self.accumulator.partial_accumulate(docs, self.window_size) n_docs += len(docs) logger.debug( "completed batch %d; %d documents processed (%d virtual)", batch_num, n_docs, self.accumulator.num_docs) logger.debug( "finished all batches; %d documents processed (%d virtual)", n_docs, self.accumulator.num_docs) def reply_to_master(self): logger.info("serializing accumulator to return to master...") self.output_q.put(self.accumulator, block=False) logger.info("accumulator serialized") class WordVectorsAccumulator(UsesDictionary): """Accumulate context vectors for words using word vector embeddings. Attributes ---------- model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`) If None, a new Word2Vec model is trained on the given text corpus. Otherwise, it should be a pre-trained Word2Vec context vectors. model_kwargs: if model is None, these keyword arguments will be passed through to the Word2Vec constructor. """ def __init__(self, relevant_ids, dictionary, model=None, **model_kwargs): super(WordVectorsAccumulator, self).__init__(relevant_ids, dictionary) self.model = model self.model_kwargs = model_kwargs def not_in_vocab(self, words): uniq_words = set(utils.flatten(words)) return set(word for word in uniq_words if word not in self.model.vocab) def get_occurrences(self, word): """Return number of docs the word occurs in, once `accumulate` has been called.""" try: self.token2id[word] # is this a token or an id? except KeyError: word = self.dictionary.id2token[word] return self.model.vocab[word].count def get_co_occurrences(self, word1, word2): """Return number of docs the words co-occur in, once `accumulate` has been called.""" raise NotImplementedError("Word2Vec model does not support co-occurrence counting") def accumulate(self, texts, window_size): if self.model is not None: logger.debug("model is already trained; no accumulation necessary") return self kwargs = self.model_kwargs.copy() if window_size is not None: kwargs['window'] = window_size kwargs['min_count'] = kwargs.get('min_count', 1) kwargs['sg'] = kwargs.get('sg', 1) kwargs['hs'] = kwargs.get('hw', 0) self.model = Word2Vec(**kwargs) self.model.build_vocab(texts) self.model.train(texts, total_examples=self.model.corpus_count, epochs=self.model.iter) self.model = self.model.wv # retain KeyedVectors return self def ids_similarity(self, ids1, ids2): words1 = self._words_with_embeddings(ids1) words2 = self._words_with_embeddings(ids2) return self.model.n_similarity(words1, words2) def _words_with_embeddings(self, ids): if not hasattr(ids, '__iter__'): ids = [ids] words = [self.dictionary.id2token[word_id] for word_id in ids] return [word for word in words if word in self.model.vocab] from __future__ import unicode_literals import logging logger = logging.getLogger(__name__) from django.contrib.auth.models import User, AnonymousUser from django.apps import apps from profiles import constants def access_levels(owner_userprofile, viewer_userprofile): """A shortcut function for efficiency in places like the profile, where it is useful to do the checks for all the access levels and return a dictionary, instead of manually checking each one. Accepts either UserProfile or User objects.""" valid_access_levels = set([constants.PUBLIC_ACCESS]) # Sometimes the viewer will be anonymous; should return # ASAP in these instances if isinstance(viewer_userprofile, AnonymousUser): return valid_access_levels # Dynamically load access to this profiles model # This is so we don't have a circular import when we use access functions # inside of models.py UserProfile_model = apps.get_model(app_label='profiles', model_name='UserProfile') if isinstance(viewer_userprofile, User): viewer_userprofile = UserProfile_model.get_profile(viewer_userprofile) if isinstance(owner_userprofile, User): owner_userprofile = UserProfile_model.get_profile(owner_userprofile) # the only valid access value for non-logged in users is the above defined # public access level if not viewer_userprofile: logger.debug(valid_access_levels) return valid_access_levels # registered level add since viewer user profile exists valid_access_levels.add(constants.REGISTERED_ACCESS) # member access level added if viewer is a member if viewer_userprofile.is_member: valid_access_levels.add(constants.MEMBERS_ACCESS) # admin access level added if viewer is an admin if viewer_userprofile.is_admin: valid_access_levels.add(constants.ADMIN_ACCESS) # private access level added if owner is same as viewer if owner_userprofile and viewer_userprofile.pk == owner_userprofile.pk: valid_access_levels.add(constants.PRIVATE_ACCESS) logger.debug(valid_access_levels) return valid_access_levels def can_access(owner_userprofile, viewer_userprofile, access_level): """Given the profile of the owner of a given security level and the access level set for the content, return True or False for whether or not a give viewer can see it.""" # public access--always true if access_level == constants.PUBLIC_ACCESS: return True # if it's not public and the user is not logged in (aka no profile) # no access if not viewer_userprofile: return False # we have a viewer profile, so someone is logged in; can return true # if our access level is registered level if access_level == constants.REGISTERED_ACCESS: return True # viewer and owner are the same, so can access # this also covers the case of constants.PRIVATE_ACCESS if owner_userprofile and owner_userprofile.pk == viewer_userprofile.pk: return True # members only access is met if viewer_userprofile.is_member and access_level == constants.MEMBERS_ACCESS: return True # admin only access is met if viewer_userprofile.is_admin and access_level == constants.ADMIN_ACCESS: return True # explicitly return False in all other cases # TODO: might want to log this, to see what situations aren't being caught # by the above return False from django.test import TestCase from django.core.urlresolvers import reverse from django.contrib.auth.models import User from rest_framework import status from rest_framework.test import APITestCase, force_authenticate from projects.models import Project from ideas.views import IdeaViewSet from ideas.models import Idea class _BaseIdeaTest(APITestCase): def setUp(self): self.user = User.objects.create_user('bob', password='foobar') self.project = Project(owner=self.user, title='testing', description='Super test', status='IDEA') self.project.save() self.client.login(username='bob', password='foobar') self.IDEA_DATA = {'owner': self.user, 'title' : 'really descriptive title', 'project' : self.project, 'description': 'test idea', 'votes': 1} self.idea = Idea(**self.IDEA_DATA) self.idea.save() def tearDown(self): self.user.delete() self.idea.delete() self.project.delete() class IdeaTests(_BaseIdeaTest): def test_idea_detail_view(self): d = {'pk' : self.idea.pk} url = reverse('idea-detail', kwargs=d) response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_that_authenticated_user_can_patch_ideas(self): url = reverse('idea-detail', kwargs={'pk':self.idea.pk}) response = self.client.patch(url, data={'description': 'Look, a new description'}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['description'], 'Look, a new description') def test_that_unauthenictaed_user_cannot_patch_ideas(self): url = reverse('idea-detail', kwargs={'pk': self.idea.pk}) self.client.logout() response = self.client.patch(url, data={'description' : 'A'}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_that_not_owner_cant_edit_ideas(self): user2 = User.objects.create_user('jimmy', password='foobar') self.client.logout() self.client.login(username='jimmy', pasword='foobar') url = reverse('idea-detail', kwargs={'pk': self.idea.pk}) response = self.client.patch(url, data={'description':'B'}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_authenticated_user_can_make_idea(self): url = reverse('idea-list') data = {'project' : reverse('project-detail', kwargs={'pk' : self.project.pk}), 'description' : "descriptive description", 'votes' : 1, 'title' : 'second title'} response = self.client.post(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_idea_detail_view_for_expected_params(self): url = reverse('idea-detail', kwargs={'pk': self.idea.pk}) response = self.client.get(url) self.assertContains(response, 'url') self.assertContains(response, 'description') self.assertContains(response, 'votes') self.assertContains(response, 'username') self.assertContains(response, 'user_link') self.assertContains(response, 'created') self.assertContains(response, 'edited') self.assertContains(response, 'project') self.assertContains(response, 'title') def test_idea_list_view_for_expected_params(self): url = reverse('idea-list') response = self.client.get(url) self.assertContains(response,'url') self.assertContains(response, 'project') self.assertContains(response,'description') self.assertContains(response,'votes') self.assertContains(response,'username') self.assertContains(response,'user_link') self.assertContains(response, 'title') # stackcollapse.py - format perf samples with one line per distinct call stack # # This script's output has two space-separated fields. The first is a semicolon # separated stack including the program name (from the "comm" field) and the # function names from the call stack. The second is a count: # # swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2 # # The file is sorted according to the first field. # # Input may be created and processed using: # # perf record -a -g -F 99 sleep 60 # perf script report stackcollapse > out.stacks-folded # # (perf script record stackcollapse works too). # # Written by Paolo Bonzini # Based on Brendan Gregg's stackcollapse-perf.pl script. import os import sys from collections import defaultdict from optparse import OptionParser, make_option sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from EventClass import * # command line parsing option_list = [ # formatting options for the bottom entry of the stack make_option("--include-tid", dest="include_tid", action="store_true", default=False, help="include thread id in stack"), make_option("--include-pid", dest="include_pid", action="store_true", default=False, help="include process id in stack"), make_option("--no-comm", dest="include_comm", action="store_false", default=True, help="do not separate stacks according to comm"), make_option("--tidy-java", dest="tidy_java", action="store_true", default=False, help="beautify Java signatures"), make_option("--kernel", dest="annotate_kernel", action="store_true", default=False, help="annotate kernel functions with _[k]") ] parser = OptionParser(option_list=option_list) (opts, args) = parser.parse_args() if len(args) != 0: parser.error("unexpected command line argument") if opts.include_tid and not opts.include_comm: parser.error("requesting tid but not comm is invalid") if opts.include_pid and not opts.include_comm: parser.error("requesting pid but not comm is invalid") # event handlers lines = defaultdict(lambda: 0) def process_event(param_dict): def tidy_function_name(sym, dso): if sym is None: sym = '[unknown]' sym = sym.replace(';', ':') if opts.tidy_java: # the original stackcollapse-perf.pl script gives the # example of converting this: # Lorg/mozilla/javascript/MemberBox;.(Ljava/lang/reflect/Method;)V # to this: # org/mozilla/javascript/MemberBox:.init sym = sym.replace('<', '') sym = sym.replace('>', '') if sym[0] == 'L' and sym.find('/'): sym = sym[1:] try: sym = sym[:sym.index('(')] except ValueError: pass if opts.annotate_kernel and dso == '[kernel.kallsyms]': return sym + '_[k]' else: return sym stack = list() if 'callchain' in param_dict: for entry in param_dict['callchain']: entry.setdefault('sym', dict()) entry['sym'].setdefault('name', None) entry.setdefault('dso', None) stack.append(tidy_function_name(entry['sym']['name'], entry['dso'])) else: param_dict.setdefault('symbol', None) param_dict.setdefault('dso', None) stack.append(tidy_function_name(param_dict['symbol'], param_dict['dso'])) if opts.include_comm: comm = param_dict["comm"].replace(' ', '_') sep = "-" if opts.include_pid: comm = comm + sep + str(param_dict['sample']['pid']) sep = "/" if opts.include_tid: comm = comm + sep + str(param_dict['sample']['tid']) stack.append(comm) stack_string = ';'.join(reversed(stack)) lines[stack_string] = lines[stack_string] + 1 def trace_end(): list = lines.keys() list.sort() for stack in list: print "%s %d" % (stack, lines[stack]) # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo.config import cfg from nova import baserpc from nova.db import base from nova import notifier from nova.objects import base as objects_base from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova.openstack.common.rpc import dispatcher as rpc_dispatcher CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') LOG = logging.getLogger(__name__) class Manager(base.Base, periodic_task.PeriodicTasks): # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' def __init__(self, host=None, db_driver=None, service_name='undefined'): if not host: host = CONF.host self.host = host self.backdoor_port = None self.service_name = service_name self.notifier = notifier.get_notifier(self.service_name, self.host) super(Manager, self).__init__(db_driver) def create_rpc_dispatcher(self, backdoor_port=None, additional_apis=None): '''Get the rpc dispatcher for this manager. If a manager would like to set an rpc API version, or support more than one class as the target of rpc messages, override this method. ''' apis = [] if additional_apis: apis.extend(additional_apis) base_rpc = baserpc.BaseRPCAPI(self.service_name, backdoor_port) apis.extend([self, base_rpc]) serializer = objects_base.NovaObjectSerializer() return rpc_dispatcher.RpcDispatcher(apis, serializer) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Hook to do additional manager initialization when one requests the service be started. This is called before any service record is created. Child classes should override this method. """ pass def pre_start_hook(self): """Hook to provide the manager the ability to do additional start-up work before any RPC queues/consumers are created. This is called after other initialization has succeeded and a service record is created. Child classes should override this method. """ pass def post_start_hook(self): """Hook to provide the manager the ability to do additional start-up work immediately after a service creates RPC consumers and starts 'running'. Child classes should override this method. """ pass # # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of inetconf interface for physical router configuration manager """ from lxml import etree from ncclient import manager import copy import time import datetime class PhysicalRouterConfig(object): # mapping from contrail family names to junos _FAMILY_MAP = { 'route-target': '', 'inet-vpn': '', 'inet6-vpn': '', 'e-vpn': '' } def __init__(self, management_ip, user_creds, vendor, product, vnc_managed, logger=None): self.management_ip = management_ip self.user_creds = user_creds self.vendor = vendor self.product = product self.vnc_managed = vnc_managed self.reset_bgp_config() self._logger = logger self.commit_stats = { 'netconf_enabled':False, 'netconf_enabled_status':'', 'last_commit_time': '', 'last_commit_duration': '', 'commit_status_message': '', 'total_commits_sent_since_up': 0, } self.bgp_config_sent = False # end __init__ def update(self, management_ip, user_creds, vendor, product, vnc_managed): self.management_ip = management_ip self.user_creds = user_creds self.vendor = vendor self.product = product self.vnc_managed = vnc_managed # end update def get_commit_stats(self): return self.commit_stats #end get_commit_stats def send_netconf(self, new_config, default_operation="merge", operation="replace"): if (self.vendor is None or self.product is None or self.vendor.lower() != "juniper" or self.product.lower() != "mx"): self._logger.info("auto configuraion of physical router is not supported \ on the configured vendor family, ip: %s, not pushing netconf message" % (self.management_ip)) self.commit_stats['netconf_enabled'] = False self.commit_stats['netconf_enabled_status'] = "netconf configuraion is not supported on this vendor/product family" return if (self.vnc_managed is None or self.vnc_managed == False): self._logger.info("vnc managed property must be set for a physical router to get auto \ configured, ip: %s, not pushing netconf message" % (self.management_ip)) self.commit_stats['netconf_enabled'] = False self.commit_stats['netconf_enabled_status'] = "netconf auto configuraion is not enabled on this physical router" return self.commit_stats['netconf_enabled'] = True self.commit_stats['netconf_enabled_status'] = '' start_time = None try: with manager.connect(host=self.management_ip, port=22, username=self.user_creds['username'], password=self.user_creds['password'], unknown_host_cb=lambda x, y: True) as m: add_config = etree.Element( "config", nsmap={"xc": "urn:ietf:params:xml:ns:netconf:base:1.0"}) config = etree.SubElement(add_config, "configuration") config_group = etree.SubElement(config, "groups", operation=operation) contrail_group = etree.SubElement(config_group, "name") contrail_group.text = "__contrail__" if isinstance(new_config, list): for nc in new_config: config_group.append(nc) else: config_group.append(new_config) if operation == "delete": apply_groups = etree.SubElement(config, "apply-groups", operation=operation) else: apply_groups = etree.SubElement(config, "apply-groups") apply_groups.text = "__contrail__" self._logger.info("\nsend netconf message: %s\n" % (etree.tostring(add_config, pretty_print=True))) m.edit_config( target='candidate', config=etree.tostring(add_config), test_option='test-then-set', default_operation=default_operation) self.commit_stats['total_commits_sent_since_up'] += 1 start_time = time.time() m.commit() end_time = time.time() self.commit_stats['commit_status_message'] = 'success' self.commit_stats['last_commit_time'] = datetime.datetime.fromtimestamp(end_time).strftime('%Y-%m-%d %H:%M:%S') self.commit_stats['last_commit_duration'] = str(end_time - start_time) except Exception as e: if self._logger: self._logger.error("Router %s: %s" % (self.management_ip, e.message)) self.commit_stats['commit_status_message'] = 'failed to apply config, router response: ' + e.message if start_time is not None: self.commit_stats['last_commit_time'] = datetime.datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S') self.commit_stats['last_commit_duration'] = str(time.time() - start_time) # end send_config def add_dynamic_tunnels(self, tunnel_source_ip, ip_fabric_nets, bgp_router_ips): self.tunnel_config = etree.Element("routing-options") dynamic_tunnels = etree.SubElement(self.tunnel_config, "dynamic-tunnels") dynamic_tunnel = etree.SubElement(dynamic_tunnels, "dynamic-tunnel") etree.SubElement(dynamic_tunnel, "name").text = "__contrail__" etree.SubElement(dynamic_tunnel, "source-address").text = tunnel_source_ip etree.SubElement(dynamic_tunnel, "gre") if ip_fabric_nets is not None: for subnet in ip_fabric_nets.get("subnet", []): dest_network = etree.SubElement(dynamic_tunnel, "destination-networks") etree.SubElement(dest_network, "name").text = subnet['ip_prefix'] + '/' + str(subnet['ip_prefix_len']) for bgp_router_ip in bgp_router_ips: dest_network = etree.SubElement(dynamic_tunnel, "destination-networks") etree.SubElement(dest_network, "name").text = bgp_router_ip + '/32' #end add_dynamic_tunnels ''' ri_name: routing instance name to be configured on mx import/export targets: routing instance import, export targets prefixes: for l3 public vrf static routes, bug#1395938 gateways: for l2 evpn, bug#1395944 router_external: this indicates the routing instance configured is for the public network interfaces: logical interfaces to be part of vrf fip_map: contrail instance ip to floating-ip map, used for snat & floating ip support network_id : this is used for configuraing irb interfaces ''' def add_routing_instance(self, ri_name, import_targets, export_targets, prefixes=[], gateways=[], router_external=False, interfaces=[], vni=None, fip_map=None, network_id=None): self.routing_instances[ri_name] = {'import_targets': import_targets, 'export_targets': export_targets, 'prefixes': prefixes, 'gateways': gateways, 'router_external': router_external, 'interfaces': interfaces, 'vni': vni, 'fip_map': fip_map} ri_config = self.ri_config or etree.Element("routing-instances") policy_config = self.policy_config or etree.Element("policy-options") ri = etree.SubElement(ri_config, "instance") etree.SubElement(ri, "name").text = ri_name ri_opt = None if router_external: ri_opt = etree.SubElement(ri, "routing-options") static_config = etree.SubElement(ri_opt, "static") route_config = etree.SubElement(static_config, "route") etree.SubElement(route_config, "name").text = "0.0.0.0/0" etree.SubElement(route_config, "next-table").text = "inet.0" #for both l2 and l3 etree.SubElement(ri, "vrf-import").text = ri_name + "-import" etree.SubElement(ri, "vrf-export").text = ri_name + "-export" if vni is None or router_external: etree.SubElement(ri, "instance-type").text = "vrf" etree.SubElement(ri, "vrf-table-label") #only for l3 if fip_map is None: for interface in interfaces: if_element = etree.SubElement(ri, "interface") etree.SubElement(if_element, "name").text = interface if ri_opt is None: ri_opt = etree.SubElement(ri, "routing-options") if prefixes and fip_map is None: static_config = etree.SubElement(ri_opt, "static") for prefix in prefixes: route_config = etree.SubElement(static_config, "route") etree.SubElement(route_config, "name").text = prefix etree.SubElement(route_config, "discard") auto_export = "" ri_opt.append(etree.fromstring(auto_export)) else: etree.SubElement(ri, "instance-type").text = "virtual-switch" if fip_map is not None: if ri_opt is None: ri_opt = etree.SubElement(ri, "routing-options") static_config = etree.SubElement(ri_opt, "static") route_config = etree.SubElement(static_config, "route") etree.SubElement(route_config, "name").text = "0.0.0.0/0" etree.SubElement(route_config, "next-hop").text = interfaces[0] if_element = etree.SubElement(ri, "interface") etree.SubElement(if_element, "name").text = interfaces[0] public_vrf_ips = {} for pip in fip_map.values(): if pip["vrf_name"] not in public_vrf_ips: public_vrf_ips[pip["vrf_name"]] = set() public_vrf_ips[pip["vrf_name"]].add(pip["floating_ip"]) for public_vrf, fips in public_vrf_ips.items(): ri_public = etree.SubElement(ri_config, "instance") etree.SubElement(ri_public, "name").text = public_vrf ri_opt = etree.SubElement(ri_public, "routing-options") static_config = etree.SubElement(ri_opt, "static") if_element = etree.SubElement(ri_public, "interface") etree.SubElement(if_element, "name").text = interfaces[1] for fip in fips: route_config = etree.SubElement(static_config, "route") etree.SubElement(route_config, "name").text = fip + "/32" etree.SubElement(route_config, "next-hop").text = interfaces[1] # add policies for export route targets ps = etree.SubElement(policy_config, "policy-statement") etree.SubElement(ps, "name").text = ri_name + "-export" term = etree.SubElement(ps, "term") etree.SubElement(term, "name").text= "t1" then = etree.SubElement(term, "then") for route_target in export_targets: comm = etree.SubElement(then, "community") etree.SubElement(comm, "add") etree.SubElement(comm, "community-name").text = route_target.replace(':', '_') if fip_map is not None: #for nat instance etree.SubElement(then, "reject") else: etree.SubElement(then, "accept") # add policies for import route targets ps = etree.SubElement(policy_config, "policy-statement") etree.SubElement(ps, "name").text = ri_name + "-import" term = etree.SubElement(ps, "term") etree.SubElement(term, "name").text= "t1" from_ = etree.SubElement(term, "from") for route_target in import_targets: target_name = route_target.replace(':', '_') etree.SubElement(from_, "community").text = target_name then = etree.SubElement(term, "then") etree.SubElement(then, "accept") then = etree.SubElement(ps, "then") etree.SubElement(then, "reject") # add firewall config for public VRF forwarding_options_config = self.forwarding_options_config firewall_config = self.firewall_config if router_external: if self.forwarding_options_config is None: forwarding_options_config = etree.Element("forwarding-options") fo = etree.SubElement(forwarding_options_config, "family") inet = etree.SubElement(fo, "inet") f = etree.SubElement(inet, "filter") etree.SubElement(f, "input").text = "redirect_to_public_vrf_filter" firewall_config = self.firewall_config or etree.Element("firewall") fc = etree.SubElement(firewall_config, "family") inet = etree.SubElement(fc, "inet") f = etree.SubElement(inet, "filter") etree.SubElement(f, "name").text = "redirect_to_public_vrf_filter" self.inet_forwarding_filter = f term = etree.SubElement(f, "term") etree.SubElement(term, "name").text= "default-term" then_ = etree.SubElement(term, "then") etree.SubElement(then_, "accept") term = etree.Element("term") etree.SubElement(term, "name").text= "term-" + ri_name[:59] if prefixes: from_ = etree.SubElement(term, "from") for prefix in prefixes: etree.SubElement(from_, "destination-address").text = prefix then_ = etree.SubElement(term, "then") etree.SubElement(then_, "routing-instance").text = ri_name #insert after 'name' element but before the last term self.inet_forwarding_filter.insert(1, term) if fip_map is not None: firewall_config = self.firewall_config or etree.Element("firewall") fc = etree.SubElement(firewall_config, "family") inet = etree.SubElement(fc, "inet") f = etree.SubElement(inet, "filter") etree.SubElement(f, "name").text = "redirect_to_" + ri_name[:46] + "_vrf" term = etree.SubElement(f, "term") etree.SubElement(term, "name").text= "term-" + ri_name[:59] from_ = etree.SubElement(term, "from") for fip_user_ip in fip_map.keys(): etree.SubElement(from_, "source-address").text = fip_user_ip then_ = etree.SubElement(term, "then") etree.SubElement(then_, "routing-instance").text = ri_name term = etree.SubElement(f, "term") etree.SubElement(term, "name").text= "default-term" then_ = etree.SubElement(term, "then") etree.SubElement(then_, "accept") interfaces_config = self.interfaces_config or etree.Element("interfaces") irb_intf = etree.SubElement(interfaces_config, "interface") etree.SubElement(irb_intf, "name").text = "irb" intf_unit = etree.SubElement(irb_intf, "unit") etree.SubElement(intf_unit, "name").text = str(network_id) family = etree.SubElement(intf_unit, "family") inet = etree.SubElement(family, "inet") f = etree.SubElement(inet, "filter") iput = etree.SubElement(f, "input") etree.SubElement(iput, "filter-name").text = "redirect_to_" + ri_name[:46] + "_vrf" # add L2 EVPN and BD config bd_config = None interfaces_config = self.interfaces_config proto_config = self.proto_config if (router_external==False and vni is not None and self.is_family_configured(self.bgp_params, "e-vpn")): etree.SubElement(ri, "vtep-source-interface").text = "lo0.0" bd_config = etree.SubElement(ri, "bridge-domains") bd= etree.SubElement(bd_config, "domain") etree.SubElement(bd, "name").text = "bd-" + str(vni) etree.SubElement(bd, "vlan-id").text = 'none' vxlan = etree.SubElement(bd, "vxlan") etree.SubElement(vxlan, "vni").text = str(vni) for interface in interfaces: if_element = etree.SubElement(bd, "interface") etree.SubElement(if_element, "name").text = interface etree.SubElement(bd, "routing-interface").text = "irb." + str(network_id) #network_id is unique, hence irb evpn_proto_config = etree.SubElement(ri, "protocols") evpn = etree.SubElement(evpn_proto_config, "evpn") etree.SubElement(evpn, "encapsulation").text = "vxlan" etree.SubElement(evpn, "extended-vni-list").text = "all" interfaces_config = self.interfaces_config or etree.Element("interfaces") irb_intf = etree.SubElement(interfaces_config, "interface") etree.SubElement(irb_intf, "name").text = "irb" etree.SubElement(irb_intf, "gratuitous-arp-reply") if gateways is not None: intf_unit = etree.SubElement(irb_intf, "unit") etree.SubElement(intf_unit, "name").text = str(network_id) family = etree.SubElement(intf_unit, "family") inet = etree.SubElement(family, "inet") for gateway in gateways: addr = etree.SubElement(inet, "address") etree.SubElement(addr, "name").text = gateway lo_intf = etree.SubElement(interfaces_config, "interface") etree.SubElement(lo_intf, "name").text = "lo0" intf_unit = etree.SubElement(lo_intf, "unit") etree.SubElement(intf_unit, "name").text = "0" family = etree.SubElement(intf_unit, "family") inet = etree.SubElement(family, "inet") addr = etree.SubElement(inet, "address") etree.SubElement(addr, "name").text = self.bgp_params['address'] + "/32" etree.SubElement(addr, "primary") etree.SubElement(addr, "preferred") for interface in interfaces: intf = etree.SubElement(interfaces_config, "interface") intfparts = interface.split(".") etree.SubElement(intf, "name").text = intfparts[0] etree.SubElement(intf, "encapsulation").text = "ethernet-bridge" intf_unit = etree.SubElement(intf, "unit") etree.SubElement(intf_unit, "name").text = intfparts[1] family = etree.SubElement(intf_unit, "family") etree.SubElement(family, "bridge") proto_config = self.proto_config or etree.Element("protocols") mpls = etree.SubElement(proto_config, "mpls") intf = etree.SubElement(mpls, "interface") etree.SubElement(intf, "name").text = "all" #fip services config services_config = self.services_config if fip_map is not None: services_config = self.services_config or etree.Element("services") service_name = 'sv-' + ri_name #mx has limitation for service-set and nat-rule name length, allowed max 63 chars service_name = service_name[:23] service_set = etree.SubElement(services_config, "service-set") etree.SubElement(service_set, "name").text = service_name nat_rule = etree.SubElement(service_set, "nat-rules") etree.SubElement(nat_rule, "name").text = service_name + "-sn-rule" nat_rule = etree.SubElement(service_set, "nat-rules") etree.SubElement(nat_rule, "name").text = service_name + "-dn-rule" next_hop_service = etree.SubElement(service_set, "next-hop-service") etree.SubElement(next_hop_service , "inside-service-interface").text = interfaces[0] etree.SubElement(next_hop_service , "outside-service-interface").text = interfaces[1] nat = etree.SubElement(services_config, "nat") snat_rule = etree.SubElement(nat, "rule") etree.SubElement(snat_rule, "name").text = service_name + "-sn-rule" etree.SubElement(snat_rule, "match-direction").text = "input" dnat_rule = etree.SubElement(nat, "rule") etree.SubElement(dnat_rule, "name").text = service_name + "-dn-rule" etree.SubElement(dnat_rule, "match-direction").text = "output" for pip, fip_vn in fip_map.items(): fip = fip_vn["floating_ip"] term = etree.SubElement(snat_rule, "term") etree.SubElement(term, "name").text = "term_" + pip.replace('.', '_') from_ = etree.SubElement(term, "from") src_addr = etree.SubElement(from_, "source-address") etree.SubElement(src_addr, "name").text = pip + "/32" # private ip then_ = etree.SubElement(term, "then") translated = etree.SubElement(then_, "translated") etree.SubElement(translated , "source-prefix").text = fip + "/32" # public ip translation_type = etree.SubElement(translated, "translation-type") etree.SubElement(translation_type, "basic-nat44") term = etree.SubElement(dnat_rule, "term") etree.SubElement(term, "name").text = "term_" + fip.replace('.', '_') from_ = etree.SubElement(term, "from") src_addr = etree.SubElement(from_, "destination-address") etree.SubElement(src_addr, "name").text = fip + "/32" #public ip then_ = etree.SubElement(term, "then") translated = etree.SubElement(then_, "translated") etree.SubElement(translated , "destination-prefix").text = pip + "/32" #source ip translation_type = etree.SubElement(translated, "translation-type") etree.SubElement(translation_type, "dnat-44") interfaces_config = self.interfaces_config or etree.Element("interfaces") si_intf = etree.SubElement(interfaces_config, "interface") intfparts = interfaces[0].split(".") etree.SubElement(si_intf, "name").text = intfparts[0] intf_unit = etree.SubElement(si_intf, "unit") etree.SubElement(intf_unit, "name").text = interfaces[0].split(".")[1] family = etree.SubElement(intf_unit, "family") etree.SubElement(family, "inet") etree.SubElement(intf_unit, "service-domain").text = "inside" intf_unit = etree.SubElement(si_intf, "unit") etree.SubElement(intf_unit, "name").text = interfaces[1].split(".")[1] family = etree.SubElement(intf_unit, "family") etree.SubElement(family, "inet") etree.SubElement(intf_unit, "service-domain").text = "outside" self.forwarding_options_config = forwarding_options_config self.firewall_config = firewall_config self.policy_config = policy_config self.proto_config = proto_config self.interfaces_config = interfaces_config self.services_config = services_config self.route_targets |= import_targets | export_targets self.ri_config = ri_config # end add_routing_instance def set_global_routing_options(self, bgp_params): if bgp_params['address'] is not None: self.global_routing_options_config = etree.Element("routing-options") etree.SubElement(self.global_routing_options_config, "router-id").text = bgp_params['address'] #end set_global_routing_options def is_family_configured(self, params, family_name): if params is None or params.get('address_families') is None: return False families = params['address_families'].get('family', []) if family_name in families: return True return False def _add_family_etree(self, parent, params): if params.get('address_families') is None: return family_etree = etree.SubElement(parent, "family") for family in params['address_families'].get('family', []): if family in self._FAMILY_MAP: family_subtree = etree.fromstring(self._FAMILY_MAP[family]) family_etree.append(family_subtree) else: etree.SubElement(family_etree, family) # end _add_family_etree def add_bgp_auth_config(self, bgp_config, bgp_params): if bgp_params.get('auth_data') is None: return keys = bgp_params['auth_data'].get('key_items', []) if len(keys) > 0: etree.SubElement(bgp_config, "authentication-key").text = keys[0].get('key') def add_bgp_hold_time_config(self, bgp_config, bgp_params): if bgp_params.get('hold_time') is None: return etree.SubElement(bgp_config, "hold-time").text = str(bgp_params.get('hold_time')) def set_bgp_config(self, params): self.bgp_params = params if (self.vnc_managed is None or self.vnc_managed == False): if self.bgp_config_sent: # user must have unset the vnc managed property, so temporaly set it # for deleting the existing config self.vnc_managed = True self.delete_bgp_config() self.vnc_managed = False return return # end set_bgp_config def _get_bgp_config_xml(self, external=False): if self.bgp_params is None: return None bgp_config = etree.Element("group", operation="replace") if external: etree.SubElement(bgp_config, "name").text = "__contrail_external__" etree.SubElement(bgp_config, "type").text = "external" else: etree.SubElement(bgp_config, "name").text = "__contrail__" etree.SubElement(bgp_config, "type").text = "internal" etree.SubElement(bgp_config, "multihop") local_address = etree.SubElement(bgp_config, "local-address") local_address.text = self.bgp_params['address'] self._add_family_etree(bgp_config, self.bgp_params) self.add_bgp_auth_config(bgp_config, self.bgp_params) self.add_bgp_hold_time_config(bgp_config, self.bgp_params) etree.SubElement(bgp_config, "keep").text = "all" return bgp_config # end _get_bgp_config_xml def reset_bgp_config(self): self.routing_instances = {} self.bgp_params = None self.ri_config = None self.tunnel_config = None self.interfaces_config = None self.services_config = None self.policy_config = None self.firewall_config = None self.inet_forwarding_filter = None self.forwarding_options_config = None self.global_routing_options_config = None self.proto_config = None self.route_targets = set() self.bgp_peers = {} self.external_peers = {} # ene reset_bgp_config def delete_bgp_config(self): if not self.bgp_config_sent: return self.reset_bgp_config() self.send_netconf([], default_operation="none", operation="delete") self.bgp_config_sent = False # end delete_config def add_bgp_peer(self, router, params, attr, external): peer_data = {} peer_data['params'] = params peer_data['attr'] = attr if external: self.external_peers[router] = peer_data else: self.bgp_peers[router] = peer_data self.send_bgp_config() # end add_peer def delete_bgp_peer(self, router): if router in self.bgp_peers: del self.bgp_peers[router] elif router in self.external_peers: del self.external_peers[rotuer] else: return self.send_bgp_config() # end delete_bgp_peer def _get_neighbor_config_xml(self, bgp_config, peers): for peer, peer_data in peers.items(): params = peer_data.get('params', {}) attr = peer_data.get('attr', {}) nbr = etree.SubElement(bgp_config, "neighbor") etree.SubElement(nbr, "name").text = peer bgp_sessions = attr.get('session') if bgp_sessions: # for now assume only one session session_attrs = bgp_sessions[0].get('attributes', []) for session_attr in session_attrs: # For not, only consider the attribute if bgp-router is # not specified if session_attr.get('bgp_router') is None: self._add_family_etree(nbr, session_attr) self.add_bgp_auth_config(nbr, session_attr) break if params.get('autonomous_system') is not None: etree.SubElement(nbr, "peer-as").text = str(params.get('autonomous_system')) # end _get_neighbor_config_xml def send_bgp_config(self): bgp_config = self._get_bgp_config_xml() if bgp_config is None: return proto_config = etree.Element("protocols") bgp = etree.SubElement(proto_config, "bgp") bgp.append(bgp_config) self._get_neighbor_config_xml(bgp_config, self.bgp_peers) if self.external_peers is not None: ext_grp_config = self._get_bgp_config_xml(True) bgp.append(ext_grp_config) self._get_neighbor_config_xml(ext_grp_config, self.external_peers) routing_options_config = etree.Element("routing-options") etree.SubElement( routing_options_config, "route-distinguisher-id").text = self.bgp_params['identifier'] etree.SubElement(routing_options_config, "autonomous-system").text = \ str(self.bgp_params.get('autonomous_system')) config_list = [proto_config, routing_options_config] if self.ri_config is not None: config_list.append(self.ri_config) for route_target in self.route_targets: comm = etree.SubElement(self.policy_config, "community") etree.SubElement(comm, 'name').text = route_target.replace(':', '_') etree.SubElement(comm, 'members').text = route_target if self.tunnel_config is not None: config_list.append(self.tunnel_config) if self.interfaces_config is not None: config_list.append(self.interfaces_config) if self.services_config is not None: config_list.append(self.services_config) if self.policy_config is not None: config_list.append(self.policy_config) if self.firewall_config is not None: config_list.append(self.firewall_config) if self.forwarding_options_config is not None: config_list.append(self.forwarding_options_config) if self.global_routing_options_config is not None: config_list.append(self.global_routing_options_config) if self.proto_config is not None: config_list.append(self.proto_config) self.send_netconf(config_list) self.bgp_config_sent = True # end send_bgp_config # end PhycalRouterConfig ############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## from zope.interface import Interface, Attribute class ICollection(Interface): def clear(): """Remove all of the items from the collection.""" def __nonzero__(): """Check if the collection is non-empty. Return a true value if the collection is non-empty and a false value otherwise. """ class IReadSequence(Interface): def __getitem__(index): """Return the value at the given index. An IndexError is raised if the index cannot be found. """ def __getslice__(index1, index2): """Return a subsequence from the original sequence. The subsequence includes the items from index1 up to, but not including, index2. """ class IKeyed(ICollection): def has_key(key): """Check whether the object has an item with the given key. Return a true value if the key is present, else a false value. """ def keys(min=None, max=None, excludemin=False, excludemax=False): """Return an IReadSequence containing the keys in the collection. The type of the IReadSequence is not specified. It could be a list or a tuple or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a min is specified, then output is constrained to keys greater than or equal to the given min, and, if excludemin is specified and true, is further constrained to keys strictly greater than min. A min value of None is ignored. If min is None or not specified, and excludemin is true, the smallest key is excluded. If a max is specified, then output is constrained to keys less than or equal to the given max, and, if excludemax is specified and true, is further constrained to keys strictly less than max. A max value of None is ignored. If max is None or not specified, and excludemax is true, the largest key is excluded. """ def maxKey(key=None): """Return the maximum key. If a key argument if provided and not None, return the largest key that is less than or equal to the argument. Raise an exception if no such key exists. """ def minKey(key=None): """Return the minimum key. If a key argument if provided and not None, return the smallest key that is greater than or equal to the argument. Raise an exception if no such key exists. """ class ISetMutable(IKeyed): def insert(key): """Add the key (value) to the set. If the key was already in the set, return 0, otherwise return 1. """ def remove(key): """Remove the key from the set. Raises KeyError if key is not in the set. """ def update(seq): """Add the items from the given sequence to the set.""" class ISized(Interface): """An object that supports __len__.""" def __len__(): """Return the number of items in the container.""" class IKeySequence(IKeyed, ISized): def __getitem__(index): """Return the key in the given index position. This allows iteration with for loops and use in functions, like map and list, that read sequences. """ class ISet(IKeySequence, ISetMutable): pass class ITreeSet(IKeyed, ISetMutable): pass class IMinimalDictionary(ISized, IKeyed): def get(key, default): """Get the value associated with the given key. Return the default if has_key(key) is false. """ def __getitem__(key): """Get the value associated with the given key. Raise KeyError if has_key(key) is false. """ def __setitem__(key, value): """Set the value associated with the given key.""" def __delitem__(key): """Delete the value associated with the given key. Raise KeyError if has_key(key) is false. """ def values(min=None, max=None, excludemin=False, excludemax=False): """Return an IReadSequence containing the values in the collection. The type of the IReadSequence is not specified. It could be a list or a tuple or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a min is specified, then output is constrained to values whose keys are greater than or equal to the given min, and, if excludemin is specified and true, is further constrained to values whose keys are strictly greater than min. A min value of None is ignored. If min is None or not specified, and excludemin is true, the value corresponding to the smallest key is excluded. If a max is specified, then output is constrained to values whose keys are less than or equal to the given max, and, if excludemax is specified and true, is further constrained to values whose keys are strictly less than max. A max value of None is ignored. If max is None or not specified, and excludemax is true, the value corresponding to the largest key is excluded. """ def items(min=None, max=None, excludemin=False, excludemax=False): """Return an IReadSequence containing the items in the collection. An item is a 2-tuple, a (key, value) pair. The type of the IReadSequence is not specified. It could be a list or a tuple or some other type. All arguments are optional, and may be specified as keyword arguments, or by position. If a min is specified, then output is constrained to items whose keys are greater than or equal to the given min, and, if excludemin is specified and true, is further constrained to items whose keys are strictly greater than min. A min value of None is ignored. If min is None or not specified, and excludemin is true, the item with the smallest key is excluded. If a max is specified, then output is constrained to items whose keys are less than or equal to the given max, and, if excludemax is specified and true, is further constrained to items whose keys are strictly less than max. A max value of None is ignored. If max is None or not specified, and excludemax is true, the item with the largest key is excluded. """ class IDictionaryIsh(IMinimalDictionary): def update(collection): """Add the items from the given collection object to the collection. The input collection must be a sequence of (key, value) 2-tuples, or an object with an 'items' method that returns a sequence of (key, value) pairs. """ def byValue(minValue): """Return a sequence of (value, key) pairs, sorted by value. Values < minValue are omitted and other values are "normalized" by the minimum value. This normalization may be a noop, but, for integer values, the normalization is division. """ def setdefault(key, d): """D.setdefault(k, d) -> D.get(k, d), also set D[k]=d if k not in D. Return the value like get() except that if key is missing, d is both returned and inserted into the dictionary as the value of k. Note that, unlike as for Python's dict.setdefault(), d is not optional. Python defaults d to None, but that doesn't make sense for mappings that can't have None as a value (for example, an IIBTree can have only integers as values). """ def pop(key, d): """D.pop(k[, d]) -> v, remove key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. """ class IBTree(IDictionaryIsh): def insert(key, value): """Insert a key and value into the collection. If the key was already in the collection, then there is no change and 0 is returned. If the key was not already in the collection, then the item is added and 1 is returned. This method is here to allow one to generate random keys and to insert and test whether the key was there in one operation. A standard idiom for generating new keys will be:: key = generate_key() while not t.insert(key, value): key=generate_key() """ class IMerge(Interface): """Object with methods for merging sets, buckets, and trees. These methods are supplied in modules that define collection classes with particular key and value types. The operations apply only to collections from the same module. For example, the IIBTree.union can only be used with IIBTree.IIBTree, IIBTree.IIBucket, IIBTree.IISet, and IIBTree.IITreeSet. The implementing module has a value type. The IOBTree and OOBTree modules have object value type. The IIBTree and OIBTree modules have integer value types. Other modules may be defined in the future that have other value types. The individual types are classified into set (Set and TreeSet) and mapping (Bucket and BTree) types. """ def difference(c1, c2): """Return the keys or items in c1 for which there is no key in c2. If c1 is None, then None is returned. If c2 is None, then c1 is returned. If neither c1 nor c2 is None, the output is a Set if c1 is a Set or TreeSet, and is a Bucket if c1 is a Bucket or BTree. """ def union(c1, c2): """Compute the Union of c1 and c2. If c1 is None, then c2 is returned, otherwise, if c2 is None, then c1 is returned. The output is a Set containing keys from the input collections. """ def intersection(c1, c2): """Compute the intersection of c1 and c2. If c1 is None, then c2 is returned, otherwise, if c2 is None, then c1 is returned. The output is a Set containing matching keys from the input collections. """ class IBTreeModule(Interface): """These are available in all modules (IOBTree, OIBTree, OOBTree, IIBTree, IFBTree, LFBTree, LOBTree, OLBTree, and LLBTree). """ BTree = Attribute( """The IBTree for this module. Also available as [prefix]BTree, as in IOBTree.""") Bucket = Attribute( """The leaf-node data buckets used by the BTree. (IBucket is not currently defined in this file, but is essentially IDictionaryIsh, with the exception of __nonzero__, as of this writing.) Also available as [prefix]Bucket, as in IOBucket.""") TreeSet = Attribute( """The ITreeSet for this module. Also available as [prefix]TreeSet, as in IOTreeSet.""") Set = Attribute( """The ISet for this module: the leaf-node data buckets used by the TreeSet. Also available as [prefix]BTree, as in IOSet.""") class IIMerge(IMerge): """Merge collections with integer value type. A primary intent is to support operations with no or integer values, which are used as "scores" to rate indiviual keys. That is, in this context, a BTree or Bucket is viewed as a set with scored keys, using integer scores. """ def weightedUnion(c1, c2, weight1=1, weight2=1): """Compute the weighted union of c1 and c2. If c1 and c2 are None, the output is (0, None). If c1 is None and c2 is not None, the output is (weight2, c2). If c1 is not None and c2 is None, the output is (weight1, c1). Else, and hereafter, c1 is not None and c2 is not None. If c1 and c2 are both sets, the output is 1 and the (unweighted) union of the sets. Else the output is 1 and a Bucket whose keys are the union of c1 and c2's keys, and whose values are:: v1*weight1 + v2*weight2 where: v1 is 0 if the key is not in c1 1 if the key is in c1 and c1 is a set c1[key] if the key is in c1 and c1 is a mapping v2 is 0 if the key is not in c2 1 if the key is in c2 and c2 is a set c2[key] if the key is in c2 and c2 is a mapping Note that c1 and c2 must be collections. """ def weightedIntersection(c1, c2, weight1=1, weight2=1): """Compute the weighted intersection of c1 and c2. If c1 and c2 are None, the output is (0, None). If c1 is None and c2 is not None, the output is (weight2, c2). If c1 is not None and c2 is None, the output is (weight1, c1). Else, and hereafter, c1 is not None and c2 is not None. If c1 and c2 are both sets, the output is the sum of the weights and the (unweighted) intersection of the sets. Else the output is 1 and a Bucket whose keys are the intersection of c1 and c2's keys, and whose values are:: v1*weight1 + v2*weight2 where: v1 is 1 if c1 is a set c1[key] if c1 is a mapping v2 is 1 if c2 is a set c2[key] if c2 is a mapping Note that c1 and c2 must be collections. """ class IMergeIntegerKey(IMerge): """IMerge-able objects with integer keys. Concretely, this means the types in IOBTree and IIBTree. """ def multiunion(seq): """Return union of (zero or more) integer sets, as an integer set. seq is a sequence of objects each convertible to an integer set. These objects are convertible to an integer set: + An integer, which is added to the union. + A Set or TreeSet from the same module (for example, an IIBTree.TreeSet for IIBTree.multiunion()). The elements of the set are added to the union. + A Bucket or BTree from the same module (for example, an IOBTree.IOBTree for IOBTree.multiunion()). The keys of the mapping are added to the union. The union is returned as a Set from the same module (for example, IIBTree.multiunion() returns an IIBTree.IISet). The point to this method is that it can run much faster than doing a sequence of two-input union() calls. Under the covers, all the integers in all the inputs are sorted via a single linear-time radix sort, then duplicates are removed in a second linear-time pass. """ class IBTreeFamily(Interface): """the 64-bit or 32-bit family""" IO = Attribute('The IIntegerObjectBTreeModule for this family') OI = Attribute('The IObjectIntegerBTreeModule for this family') II = Attribute('The IIntegerIntegerBTreeModule for this family') IF = Attribute('The IIntegerFloatBTreeModule for this family') OO = Attribute('The IObjectObjectBTreeModule for this family') maxint = Attribute('The maximum integer storable in this family') minint = Attribute('The minimum integer storable in this family') class IIntegerObjectBTreeModule(IBTreeModule, IMerge): """keys, or set values, are integers; values are objects. describes IOBTree and LOBTree""" family = Attribute('The IBTreeFamily of this module') class IObjectIntegerBTreeModule(IBTreeModule, IIMerge): """keys, or set values, are objects; values are integers. Object keys (and set values) must sort reliably (for instance, *not* on object id)! Homogenous key types recommended. describes OIBTree and LOBTree""" family = Attribute('The IBTreeFamily of this module') class IIntegerIntegerBTreeModule(IBTreeModule, IIMerge, IMergeIntegerKey): """keys, or set values, are integers; values are also integers. describes IIBTree and LLBTree""" family = Attribute('The IBTreeFamily of this module') class IObjectObjectBTreeModule(IBTreeModule, IMerge): """keys, or set values, are objects; values are also objects. Object keys (and set values) must sort reliably (for instance, *not* on object id)! Homogenous key types recommended. describes OOBTree""" # Note that there's no ``family`` attribute; all families include # the OO flavor of BTrees. class IIntegerFloatBTreeModule(IBTreeModule, IMerge): """keys, or set values, are integers; values are floats. describes IFBTree and LFBTree""" family = Attribute('The IBTreeFamily of this module') try: from ZODB.POSException import BTreesConflictError except ImportError: class BTreesConflictError(ValueError): @property def reason(self): return self.args[-1] ############################################################### # IMPORTANT NOTE # # Getting the length of a BTree, TreeSet, or output of keys, # values, or items of same is expensive. If you need to get the # length, you need to maintain this separately. # # Eventually, I need to express this through the interfaces. # ################################################################ from .models import GallerySection, Image, GALLERY_SETTINGS, Thumber from django.contrib import admin from django import forms from django.shortcuts import render from django.http import HttpResponseRedirect from django.urls import reverse try: from tinymce import widgets as tinymce_widgets except ImportError: tinymce_widgets = None class GalleryAdminForm(forms.ModelForm): class Meta: model = GallerySection if tinymce_widgets: widgets = { 'description': tinymce_widgets.AdminTinyMCE, } exclude = () class ImageAdminForm(forms.ModelForm): class Meta(object): model = Image widgets = { 'description': forms.TextInput } exclude = () class ImageInline(admin.StackedInline): model = Image form = ImageAdminForm extra = 1 min_num = 0 class ThumbnailForm(forms.Form): _selected_action = forms.CharField(widget=forms.MultipleHiddenInput) sizes = GALLERY_SETTINGS.get("sizes").get("thumb") width = forms.IntegerField(initial=sizes.get('width')) height = forms.IntegerField(initial=sizes.get('height')) class GalleryAdmin(admin.ModelAdmin): inlines = [ImageInline] prepopulated_fields = { "slug": ("title", ) } fieldsets = ( (None, { 'fields': ('title', 'slug', 'description') }), ("options", { 'fields': ('position', 'listed') }), ) actions = ['as_html'] form = GalleryAdminForm list_display = ('text_title', 'position') list_editable = ('position', ) ordering = ('position', 'title') def as_html(self, request, queryset): form = None thumber = None if 'apply' in request.POST: form = ThumbnailForm(request.POST) if form.is_valid(): thumber = Thumber(form.cleaned_data['width'], form.cleaned_data['height']) if not form: form = ThumbnailForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)}) return render(request, 'gallery/admin_gallery_as_html.html', { 'title': "Gallery as HTML", 'gallery_form': form, 'thumber': thumber, 'galleries': queryset, 'location': request.get_full_path, }) admin.site.register(GallerySection, GalleryAdmin) class MoveGalleryForm(forms.Form): _selected_action = forms.CharField(widget=forms.MultipleHiddenInput) gallery = forms.ModelChoiceField(GallerySection.objects, required=False) class ImageAdmin(admin.ModelAdmin): list_display = ('thumb', 'image', 'gallery', 'description') list_filter = ('gallery',) actions = ['change_gallery', 'as_html'] def change_gallery(self, request, queryset): form = None if 'apply' in request.POST: form = MoveGalleryForm(request.POST) if form.is_valid(): gallery = form.cleaned_data['gallery'] queryset.update(gallery=gallery) if gallery: self.message_user(request, "Moved images to gallery: {}.".format(gallery.title)) else: self.message_user(request, "Removed images from gallery.") return HttpResponseRedirect(request.get_full_path()) if not form: form = MoveGalleryForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)}) return render(request, 'gallery/admin_change_gallery.html', { 'title': 'Change Image Gallery', 'images': queryset, 'gallery_form': form, }) def as_html(self, request, queryset): form = None thumber = None if 'apply' in request.POST: form = ThumbnailForm(request.POST) if form.is_valid(): thumber = Thumber(form.cleaned_data['width'], form.cleaned_data['height']) if not form: form = ThumbnailForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)}) return render(request, 'gallery/admin_images_as_html.html', { 'title': "Images as HTML", 'gallery_form': form, 'thumber': thumber, 'images': queryset, 'location': request.get_full_path, }) def thumb(self, obj): if obj.thumbnail: return ''.format(obj.thumbnail) return obj.image thumb.allow_tags = True admin.site.register(Image, ImageAdmin) # Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import requests import six from cinder import test from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.scaleio import driver class CustomResponseMode(object): """A context manager to define a custom set of per-request response modes. Example: with CustomResponseMode(self, **{ 'some/api/path': RESPONSE_MODE.Valid, 'another/api/path': RESPONSE_MODE.BadStatus, 'last/api/path': MockResponse('some data', status_code=403), }): self.assertRaises(SomeException, self.driver.api_call, data) """ def __init__(self, test_instance, **kwargs): self.test_instance = test_instance self.custom_responses = kwargs self.current_responses = None def __enter__(self): self.current_responses = self.test_instance.HTTPS_MOCK_RESPONSES https_responses = copy.deepcopy( self.test_instance.HTTPS_MOCK_RESPONSES ) current_mode = self.test_instance.current_https_response_mode for call, new_mode in self.custom_responses.items(): if isinstance(new_mode, mocks.MockHTTPSResponse): https_responses[current_mode][call] = new_mode else: https_responses[current_mode][call] = \ self.test_instance.get_https_response(call, new_mode) self.test_instance.HTTPS_MOCK_RESPONSES = https_responses def __exit__(self, exc_type, exc_val, exc_tb): self.test_instance.HTTPS_MOCK_RESPONSES = self.current_responses class TestScaleIODriver(test.TestCase): """Base ``TestCase`` subclass for the ``ScaleIODriver``""" RESPONSE_MODE = type(str('ResponseMode'), (object, ), dict( Valid='0', Invalid='1', BadStatus='2', ValidVariant='3', )) __RESPONSE_MODE_NAMES = { '0': 'Valid', '1': 'Invalid', '2': 'BadStatus', '3': 'ValidVariant', } BAD_STATUS_RESPONSE = mocks.MockHTTPSResponse( { 'errorCode': 500, 'message': 'BadStatus Response Test', }, 500 ) OLD_VOLUME_NOT_FOUND_ERROR = 78 VOLUME_NOT_FOUND_ERROR = 79 HTTPS_MOCK_RESPONSES = {} __COMMON_HTTPS_MOCK_RESPONSES = { RESPONSE_MODE.Valid: { 'login': 'login_token', }, RESPONSE_MODE.BadStatus: { 'login': mocks.MockHTTPSResponse( { 'errorCode': 403, 'message': 'Bad Login Response Test', }, 403 ), }, } __https_response_mode = RESPONSE_MODE.Valid log = None STORAGE_POOL_ID = six.text_type('1') STORAGE_POOL_NAME = 'SP1' PROT_DOMAIN_ID = six.text_type('1') PROT_DOMAIN_NAME = 'PD1' STORAGE_POOLS = ['{}:{}'.format(PROT_DOMAIN_NAME, STORAGE_POOL_NAME)] def setUp(self): """Setup a test case environment. Creates a ``ScaleIODriver`` instance Mocks the ``requests.get/post`` methods to return ``MockHTTPSResponse``'s instead. """ super(TestScaleIODriver, self).setUp() self.configuration = conf.Configuration(driver.scaleio_opts, conf.SHARED_CONF_GROUP) self._set_overrides() self.driver = mocks.ScaleIODriver(configuration=self.configuration) self.mock_object(requests, 'get', self.do_request) self.mock_object(requests, 'post', self.do_request) def _set_overrides(self): # Override the defaults to fake values self.override_config('san_ip', override='127.0.0.1', group=conf.SHARED_CONF_GROUP) self.override_config('sio_rest_server_port', override='8888', group=conf.SHARED_CONF_GROUP) self.override_config('san_login', override='test', group=conf.SHARED_CONF_GROUP) self.override_config('san_password', override='pass', group=conf.SHARED_CONF_GROUP) self.override_config('sio_storage_pool_id', override=self.STORAGE_POOL_ID, group=conf.SHARED_CONF_GROUP) self.override_config('sio_protection_domain_id', override=self.PROT_DOMAIN_ID, group=conf.SHARED_CONF_GROUP) self.override_config('sio_storage_pools', override='PD1:SP1', group=conf.SHARED_CONF_GROUP) self.override_config('max_over_subscription_ratio', override=5.0, group=conf.SHARED_CONF_GROUP) self.override_config('sio_server_api_version', override='2.0.0', group=conf.SHARED_CONF_GROUP) def do_request(self, url, *args, **kwargs): """Do a fake GET/POST API request. Splits `url` on '/api/' to get the what API call is, then returns the value of `self.HTTPS_MOCK_RESPONSES[][]` converting to a `MockHTTPSResponse` if necessary. :raises test.TestingException: If the current mode/api_call does not exist. :returns MockHTTPSResponse: """ return self.get_https_response(url.split('/api/')[1]) def set_https_response_mode(self, mode=RESPONSE_MODE.Valid): """Set the HTTPS response mode. RESPONSE_MODE.Valid: Respond with valid data RESPONSE_MODE.Invalid: Respond with invalid data RESPONSE_MODE.BadStatus: Response with not-OK status code. """ self.__https_response_mode = mode def get_https_response(self, api_path, mode=None): if mode is None: mode = self.__https_response_mode try: response = self.HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: try: response = self.__COMMON_HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: raise test.TestingException( 'Mock API Endpoint not implemented: [{}]{}'.format( self.__RESPONSE_MODE_NAMES[mode], api_path ) ) if not isinstance(response, mocks.MockHTTPSResponse): return mocks.MockHTTPSResponse(response, 200) return response @property def current_https_response_mode(self): return self.__https_response_mode def https_response_mode_name(self, mode): return self.__RESPONSE_MODE_NAMES[mode] def custom_response_mode(self, **kwargs): return CustomResponseMode(self, **kwargs) from django import template from django.contrib.admin.utils import quote from django.core.urlresolvers import Resolver404, get_script_prefix, resolve from django.utils.http import urlencode from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse register = template.Library() @register.filter def admin_urlname(value, arg): return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg) @register.filter def admin_urlquote(value): return quote(value) @register.simple_tag(takes_context=True) def add_preserved_filters(context, url, popup=False, to_field=None): opts = context.get('opts') preserved_filters = context.get('preserved_filters') parsed_url = list(urlparse(url)) parsed_qs = dict(parse_qsl(parsed_url[4])) merged_qs = dict() if opts and preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters)) match_url = '/%s' % url.partition(get_script_prefix())[2] try: match = resolve(match_url) except Resolver404: pass else: current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if changelist_url == current_url and '_changelist_filters' in preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters'])) merged_qs.update(preserved_filters) if popup: from django.contrib.admin.options import IS_POPUP_VAR merged_qs[IS_POPUP_VAR] = 1 if to_field: from django.contrib.admin.options import TO_FIELD_VAR merged_qs[TO_FIELD_VAR] = to_field merged_qs.update(parsed_qs) parsed_url[4] = urlencode(merged_qs) return urlunparse(parsed_url) #!/usr/bin/python # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import re import lldb import fblldbbase as fb def objc_getClass(className): command = '(void*)objc_getClass("{}")'.format(className) value = fb.evaluateExpression(command) return value def object_getClass(object): command = '(void*)object_getClass({})'.format(object) value = fb.evaluateExpression(command) return value def class_getName(klass): command = '(const char*)class_getName((Class){})'.format(klass) value = fb.evaluateExpressionValue(command).GetSummary().strip('"') return value def class_getSuperclass(klass): command = '(void*)class_getSuperclass((Class){})'.format(klass) value = fb.evaluateExpression(command) return value def class_isMetaClass(klass): command = 'class_isMetaClass((Class){})'.format(klass) return fb.evaluateBooleanExpression(command) def class_getInstanceMethod(klass, selector): command = '(void*)class_getInstanceMethod((Class){}, @selector({}))'.format(klass, selector) value = fb.evaluateExpression(command) return value def currentArch(): targetTriple = lldb.debugger.GetSelectedTarget().GetTriple() arch = targetTriple.split('-')[0] if arch == 'x86_64h': arch = 'x86_64' return arch def functionPreambleExpressionForSelf(): import re arch = currentArch() expressionForSelf = None if arch == 'i386': expressionForSelf = '*(id*)($esp+4)' elif arch == 'x86_64': expressionForSelf = '(id)$rdi' elif arch == 'arm64': expressionForSelf = '(id)$x0' elif re.match(r'^armv.*$', arch): expressionForSelf = '(id)$r0' return expressionForSelf def functionPreambleExpressionForObjectParameterAtIndex(parameterIndex): arch = currentArch() expresssion = None if arch == 'i386': expresssion = '*(id*)($esp + ' + str(12 + parameterIndex * 4) + ')' elif arch == 'x86_64': if parameterIndex > 3: raise Exception("Current implementation can not return object at index greater than 3 for x86_64") registersList = ['rdx', 'rcx', 'r8', 'r9'] expresssion = '(id)$' + registersList[parameterIndex] elif arch == 'arm64': if parameterIndex > 5: raise Exception("Current implementation can not return object at index greater than 5 for arm64") expresssion = '(id)$x' + str(parameterIndex + 2) elif re.match(r'^armv.*$', arch): if parameterIndex > 1: raise Exception("Current implementation can not return object at index greater than 1 for arm32") expresssion = '(id)$r' + str(parameterIndex + 2) return expresssion def isMacintoshArch(): arch = currentArch() if not arch == 'x86_64': return False nsClassName = 'NSApplication' command = '(void*)objc_getClass("{}")'.format(nsClassName) return (fb.evaluateBooleanExpression(command + '!= nil')) def isIOSSimulator(): return fb.evaluateExpressionValue('(id)[[UIDevice currentDevice] model]').GetObjectDescription().lower().find('simulator') >= 0 def isIOSDevice(): return not isMacintoshArch() and not isIOSSimulator() #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 4, delta, theta, alpha low, alpha high, beta low, batch size = 5 and multiclass data set (BALANCED) with signal only data @author: yaric """ import experiment as ex import config from time import time n_hidden = 4 batch_size = 5 max_cls_samples = 7 experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h_mc_signal_%d' % (n_hidden, batch_size, max_cls_samples) # will be used as parent dir for analyzer results # The sample records identifiers signal_ids = ['IO_10_2', 'KS_10_2', 'RO_10_2'] class_lbs = ['IO', 'KS', 'RO'] noise_ids = ['noise'] # Setup analyzer configuration analyzer_config = ex.defaultAnalyzerConfig() analyzer_config['batch_size'] = batch_size analyzer_config['learning_rate'] = 0.1 analyzer_config['n_hidden'] = n_hidden analyzer_config['training_epochs'] = 50000 analyzer_config['encoder'] = 'cA' analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l' start = time() # # Run analyzer # print("\nStart analysis with parameters:\n%s\n" % analyzer_config) print("Start analysis for signal records: %s" % signal_ids) ex.runEEGAnalyzerWithIDs(ids_list=signal_ids, experiment_name=experiment_name, a_config=analyzer_config) # # Run classifiers # signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name) out_suffix = experiment_name print("Run classifiers over analyzed records. \nSignal dir: %s" % (signal_dir)) ex.runSignalsOnlyClassifier(signal_dir=signal_dir, signal_records=signal_ids, out_suffix=out_suffix, signal_class_labels=class_lbs, max_cls_samples=max_cls_samples) print("\n\nExperiment %s took %.2f seconds.\n" % (experiment_name, time() - start)) from django.contrib.admin import widgets as admin_widgets from django.forms.utils import flatatt from django.template.loader import get_template from django.utils.encoding import force_text from django.utils.html import conditional_escape from judge.widgets.mixins import CompressorWidgetMixin __all__ = ['PagedownWidget', 'AdminPagedownWidget', 'MathJaxPagedownWidget', 'MathJaxAdminPagedownWidget', 'HeavyPreviewPageDownWidget', 'HeavyPreviewAdminPageDownWidget'] try: from pagedown.widgets import PagedownWidget as OldPagedownWidget except ImportError: PagedownWidget = None AdminPagedownWidget = None MathJaxPagedownWidget = None MathJaxAdminPagedownWidget = None HeavyPreviewPageDownWidget = None HeavyPreviewAdminPageDownWidget = None else: class PagedownWidget(CompressorWidgetMixin, OldPagedownWidget): # The goal here is to compress all the pagedown JS into one file. # We do not want any further compress down the chain, because # 1. we'll creating multiple large JS files to download. # 2. this is not a problem here because all the pagedown JS files will be used together. compress_js = True def __init__(self, *args, **kwargs): kwargs.setdefault('css', ('pagedown_widget.css',)) super(PagedownWidget, self).__init__(*args, **kwargs) class AdminPagedownWidget(PagedownWidget, admin_widgets.AdminTextareaWidget): class Media: css = {'all': [ 'content-description.css', 'admin/css/pagedown.css', ]} js = ['admin/js/pagedown.js'] class MathJaxPagedownWidget(PagedownWidget): class Media: js = [ 'mathjax_config.js', 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-AMS-MML_HTMLorMML', 'pagedown_math.js', ] class MathJaxAdminPagedownWidget(AdminPagedownWidget, MathJaxPagedownWidget): pass class HeavyPreviewPageDownWidget(PagedownWidget): def __init__(self, *args, **kwargs): kwargs.setdefault('template', 'pagedown.html') self.preview_url = kwargs.pop('preview') self.preview_timeout = kwargs.pop('preview_timeout', None) self.hide_preview_button = kwargs.pop('hide_preview_button', False) super(HeavyPreviewPageDownWidget, self).__init__(*args, **kwargs) def render(self, name, value, attrs=None, renderer=None): if value is None: value = '' final_attrs = self.build_attrs(attrs, {'name': name}) if 'class' not in final_attrs: final_attrs['class'] = '' final_attrs['class'] += ' wmd-input' return get_template(self.template).render(self.get_template_context(final_attrs, value)) def get_template_context(self, attrs, value): return { 'attrs': flatatt(attrs), 'body': conditional_escape(force_text(value)), 'id': attrs['id'], 'show_preview': self.show_preview, 'preview_url': self.preview_url, 'preview_timeout': self.preview_timeout, 'extra_classes': 'dmmd-no-button' if self.hide_preview_button else None, } class Media: css = {'all': ['dmmd-preview.css']} js = ['dmmd-preview.js'] class HeavyPreviewAdminPageDownWidget(AdminPagedownWidget, HeavyPreviewPageDownWidget): class Media: css = {'all': [ 'pygment-github.css', 'table.css', 'ranks.css', ]} #!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutIteration(Koan): def test_iterators_are_a_type(self): it = iter(range(1,6)) fib = 0 for num in it: fib += num self.assertEqual(__ , fib) def test_iterating_with_next(self): stages = iter(['alpha','beta','gamma']) try: self.assertEqual(__, next(stages)) next(stages) self.assertEqual(__, next(stages)) next(stages) except StopIteration as ex: err_msg = 'Ran out of iterations' self.assertRegexpMatches(err_msg, __) # ------------------------------------------------------------------ def add_ten(self, item): return item + 10 def test_map_transforms_elements_of_a_list(self): seq = [1, 2, 3] mapped_seq = list() mapping = map(self.add_ten, seq) self.assertNotEqual(list, mapping.__class__) self.assertEqual(__, mapping.__class__) # In Python 3 built in iterator funcs return iterable view objects # instead of lists for item in mapping: mapped_seq.append(item) self.assertEqual(__, mapped_seq) # Note, iterator methods actually return objects of iter type in # python 3. In python 2 map() would give you a list. def test_filter_selects_certain_items_from_a_list(self): def is_even(item): return (item % 2) == 0 seq = [1, 2, 3, 4, 5, 6] even_numbers = list() for item in filter(is_even, seq): even_numbers.append(item) self.assertEqual(__, even_numbers) def test_just_return_first_item_found(self): def is_big_name(item): return len(item) > 4 names = ["Jim", "Bill", "Clarence", "Doug", "Eli"] name = None iterator = filter(is_big_name, names) try: name = next(iterator) except StopIteration: msg = 'Ran out of big names' self.assertEqual(__, name) # ------------------------------------------------------------------ def add(self,accum,item): return accum + item def multiply(self,accum,item): return accum * item def test_reduce_will_blow_your_mind(self): import functools # As of Python 3 reduce() has been demoted from a builtin function # to the functools module. result = functools.reduce(self.add, [2, 3, 4]) self.assertEqual(__, result.__class__) # Reduce() syntax is same as Python 2 self.assertEqual(__, result) result2 = functools.reduce(self.multiply, [2, 3, 4], 1) self.assertEqual(__, result2) # Extra Credit: # Describe in your own words what reduce does. # ------------------------------------------------------------------ def test_use_pass_for_iterations_with_no_body(self): for num in range(1,5): pass self.assertEqual(__, num) # ------------------------------------------------------------------ def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self): # Ranges are an iterable sequence result = map(self.add_ten, range(1,4)) self.assertEqual(__, list(result)) try: file = open("example_file.txt") try: def make_upcase(line): return line.strip().upper() upcase_lines = map(make_upcase, file.readlines()) self.assertEqual(__, list(upcase_lines)) finally: # Arg, this is ugly. # We will figure out how to fix this later. file.close() except IOError: # should never happen self.fail() #!/usr/bin/env python # Copyright 2015 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import requests import socket from urlparse import urlparse def CheckServiceAddress(address): hostname = urlparse(address).hostname service_address = socket.gethostbyname(hostname) print service_address def GetServerResponse(address): print 'Send request to:', address response = requests.get(address) print response print response.content def Main(): parser = argparse.ArgumentParser() parser.add_argument('address') args = parser.parse_args() CheckServiceAddress(args.address) GetServerResponse(args.address) if __name__ == "__main__": Main() """Provides a common base for Apache proxies""" import re import os import subprocess import mock import zope.interface from letsencrypt import configuration from letsencrypt import errors as le_errors from letsencrypt_apache import configurator from letsencrypt_compatibility_test import errors from letsencrypt_compatibility_test import interfaces from letsencrypt_compatibility_test import util from letsencrypt_compatibility_test.configurators import common as configurators_common APACHE_VERSION_REGEX = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) APACHE_COMMANDS = ["apachectl", "a2enmod", "a2dismod"] class Proxy(configurators_common.Proxy): # pylint: disable=too-many-instance-attributes """A common base for Apache test configurators""" zope.interface.implements(interfaces.IConfiguratorProxy) def __init__(self, args): """Initializes the plugin with the given command line args""" super(Proxy, self).__init__(args) self.le_config.apache_le_vhost_ext = "-le-ssl.conf" self._setup_mock() self.modules = self.server_root = self.test_conf = self.version = None self._apache_configurator = self._all_names = self._test_names = None def _setup_mock(self): """Replaces specific modules with mock.MagicMock""" mock_subprocess = mock.MagicMock() mock_subprocess.check_call = self.check_call mock_subprocess.Popen = self.popen mock.patch( "letsencrypt_apache.configurator.subprocess", mock_subprocess).start() mock.patch( "letsencrypt_apache.parser.subprocess", mock_subprocess).start() mock.patch( "letsencrypt.le_util.subprocess", mock_subprocess).start() mock.patch( "letsencrypt_apache.configurator.le_util.exe_exists", _is_apache_command).start() patch = mock.patch( "letsencrypt_apache.configurator.display_ops.select_vhost") mock_display = patch.start() mock_display.side_effect = le_errors.PluginError( "Unable to determine vhost") def check_call(self, command, *args, **kwargs): """If command is an Apache command, command is executed in the running docker image. Otherwise, subprocess.check_call is used. """ if _is_apache_command(command): command = _modify_command(command) return super(Proxy, self).check_call(command, *args, **kwargs) else: return subprocess.check_call(command, *args, **kwargs) def popen(self, command, *args, **kwargs): """If command is an Apache command, command is executed in the running docker image. Otherwise, subprocess.Popen is used. """ if _is_apache_command(command): command = _modify_command(command) return super(Proxy, self).popen(command, *args, **kwargs) else: return subprocess.Popen(command, *args, **kwargs) def __getattr__(self, name): """Wraps the Apache Configurator methods""" method = getattr(self._apache_configurator, name, None) if callable(method): return method else: raise AttributeError() def load_config(self): """Loads the next configuration for the plugin to test""" if hasattr(self.le_config, "apache_init_script"): try: self.check_call([self.le_config.apache_init_script, "stop"]) except errors.Error: raise errors.Error( "Failed to stop previous apache config from running") config = super(Proxy, self).load_config() self.modules = _get_modules(config) self.version = _get_version(config) self._all_names, self._test_names = _get_names(config) server_root = _get_server_root(config) with open(os.path.join(config, "config_file")) as f: config_file = os.path.join(server_root, f.readline().rstrip()) self.test_conf = _create_test_conf(server_root, config_file) self.preprocess_config(server_root) self._prepare_configurator(server_root, config_file) try: self.check_call("apachectl -d {0} -f {1} -k start".format( server_root, config_file)) except errors.Error: raise errors.Error( "Apache failed to load {0} before tests started".format( config)) return config def preprocess_config(self, server_root): # pylint: disable=anomalous-backslash-in-string, no-self-use """Prepares the configuration for use in the Docker""" find = subprocess.Popen( ["find", server_root, "-type", "f"], stdout=subprocess.PIPE) subprocess.check_call([ "xargs", "sed", "-e", "s/DocumentRoot.*/DocumentRoot " "\/usr\/local\/apache2\/htdocs/I", "-e", "s/SSLPassPhraseDialog.*/SSLPassPhraseDialog builtin/I", "-e", "s/TypesConfig.*/TypesConfig " "\/usr\/local\/apache2\/conf\/mime.types/I", "-e", "s/LoadModule/#LoadModule/I", "-e", "s/SSLCertificateFile.*/SSLCertificateFile " "\/usr\/local\/apache2\/conf\/empty_cert.pem/I", "-e", "s/SSLCertificateKeyFile.*/SSLCertificateKeyFile " "\/usr\/local\/apache2\/conf\/rsa1024_key2.pem/I", "-i"], stdin=find.stdout) def _prepare_configurator(self, server_root, config_file): """Prepares the Apache plugin for testing""" self.le_config.apache_server_root = server_root self.le_config.apache_ctl = "apachectl -d {0} -f {1}".format( server_root, config_file) self.le_config.apache_enmod = "a2enmod.sh {0}".format(server_root) self.le_config.apache_dismod = "a2dismod.sh {0}".format(server_root) self.le_config.apache_init_script = self.le_config.apache_ctl + " -k" self._apache_configurator = configurator.ApacheConfigurator( config=configuration.NamespaceConfig(self.le_config), name="apache") self._apache_configurator.prepare() def cleanup_from_tests(self): """Performs any necessary cleanup from running plugin tests""" super(Proxy, self).cleanup_from_tests() mock.patch.stopall() def get_all_names_answer(self): """Returns the set of domain names that the plugin should find""" if self._all_names: return self._all_names else: raise errors.Error("No configuration file loaded") def get_testable_domain_names(self): """Returns the set of domain names that can be tested against""" if self._test_names: return self._test_names else: return {"example.com"} def deploy_cert(self, domain, cert_path, key_path, chain_path=None): """Installs cert""" cert_path, key_path, chain_path = self.copy_certs_and_keys( cert_path, key_path, chain_path) self._apache_configurator.deploy_cert( domain, cert_path, key_path, chain_path) def _is_apache_command(command): """Returns true if command is an Apache command""" if isinstance(command, list): command = command[0] for apache_command in APACHE_COMMANDS: if command.startswith(apache_command): return True return False def _modify_command(command): """Modifies command so configtest works inside the docker image""" if isinstance(command, list): for i in xrange(len(command)): if command[i] == "configtest": command[i] = "-t" else: command = command.replace("configtest", "-t") return command def _create_test_conf(server_root, apache_config): """Creates a test config file and adds it to the Apache config""" test_conf = os.path.join(server_root, "test.conf") open(test_conf, "w").close() subprocess.check_call( ["sed", "-i", "1iInclude test.conf", apache_config]) return test_conf def _get_server_root(config): """Returns the server root directory in config""" subdirs = [ name for name in os.listdir(config) if os.path.isdir(os.path.join(config, name))] if len(subdirs) != 1: errors.Error("Malformed configuration directiory {0}".format(config)) return os.path.join(config, subdirs[0].rstrip()) def _get_names(config): """Returns all and testable domain names in config""" all_names = set() non_ip_names = set() with open(os.path.join(config, "vhosts")) as f: for line in f: # If parsing a specific vhost if line[0].isspace(): words = line.split() if words[0] == "alias": all_names.add(words[1]) non_ip_names.add(words[1]) # If for port 80 and not IP vhost elif words[1] == "80" and not util.IP_REGEX.match(words[3]): all_names.add(words[3]) non_ip_names.add(words[3]) elif "NameVirtualHost" not in line: words = line.split() if (words[0].endswith("*") or words[0].endswith("80") and not util.IP_REGEX.match(words[1]) and words[1].find(".") != -1): all_names.add(words[1]) return all_names, non_ip_names def _get_modules(config): """Returns the list of modules found in module_list""" modules = [] with open(os.path.join(config, "modules")) as f: for line in f: # Modules list is indented, everything else is headers/footers if line[0].isspace(): words = line.split() # Modules redundantly end in "_module" which we can discard modules.append(words[0][:-7]) return modules def _get_version(config): """Return version of Apache Server. Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7)). Code taken from the Apache plugin. """ with open(os.path.join(config, "version")) as f: # Should be on first line of input matches = APACHE_VERSION_REGEX.findall(f.readline()) if len(matches) != 1: raise errors.Error("Unable to find Apache version") return tuple([int(i) for i in matches[0].split(".")]) import socket class TcpClient(object): def __init__(self, port, host): """ Constructor for TCP Client :param port: the port that the client is going to try and access on the server :param host: the host of the sever """ self.port = port self.host = host self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self): """ Connects to the server and initiates interactions :return: None """ self.sock.connect((self.host, self.port)) self.interact_with_server() def interact_with_server(self): """ Handles interaction with the server :return: None """ message_size = 1024 block_message = 'You are not allowed to use this server!' server_message = self.sock.recv(message_size) print server_message if server_message != block_message: client_message = raw_input('Please enter a sentence:') print 'You entered ', client_message self.sock.send(client_message) server_message = self.sock.recv(message_size) print 'And received ', server_message self.sock.close() else: print 'You have been blocked' self.sock.close() #!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: oneandone_load_balancer short_description: Configure 1&1 load balancer. description: - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0 version_added: "2.5" options: state: description: - Define a load balancer state to create, remove, or update. required: false default: 'present' choices: [ "present", "absent", "update" ] auth_token: description: - Authenticating API token provided by 1&1. required: true load_balancer: description: - The identifier (id or name) of the load balancer used with update state. required: true api_url: description: - Custom API URL. Overrides the ONEANDONE_API_URL environement variable. required: false name: description: - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 required: true health_check_test: description: - Type of the health check. At the moment, HTTP is not allowed. choices: [ "NONE", "TCP", "HTTP", "ICMP" ] required: true health_check_interval: description: - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 required: true health_check_path: description: - Url to call for cheking. Required for HTTP health check. maxLength=1000 required: false health_check_parse: description: - Regular expression to check. Required for HTTP health check. maxLength=64 required: false persistence: description: - Persistence. required: true type: bool persistence_time: description: - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 required: true method: description: - Balancing procedure. choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] required: true datacenter: description: - ID or country code of the datacenter where the load balancer will be created. default: US choices: [ "US", "ES", "DE", "GB" ] required: false rules: description: - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, port_balancer, and port_server parameters, in addition to source parameter, which is optional. required: true description: description: - Description of the load balancer. maxLength=256 required: false add_server_ips: description: - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with update state. required: false remove_server_ips: description: - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. required: false add_rules: description: - A list of rules that will be added to an existing load balancer. It is syntax is the same as the one used for rules parameter. Used in combination with update state. required: false remove_rules: description: - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. required: false wait: description: - wait for the instance to be in state 'running' before returning required: false default: "yes" type: bool wait_timeout: description: - how long before wait gives up, in seconds default: 600 wait_interval: description: - Defines the number of seconds to wait when using the _wait_for methods default: 5 requirements: - "1and1" - "python >= 2.6" author: - Amel Ajdinovic (@aajdinov) - Ethan Devenport (@edevenport) ''' EXAMPLES = ''' # Provisioning example. Create and destroy a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key name: ansible load balancer description: Testing creation of load balancer with ansible health_check_test: TCP health_check_interval: 40 persistence: true persistence_time: 1200 method: ROUND_ROBIN datacenter: US rules: - protocol: TCP port_balancer: 80 port_server: 80 source: 0.0.0.0 wait: true wait_timeout: 500 - oneandone_load_balancer: auth_token: oneandone_private_api_key name: ansible load balancer wait: true wait_timeout: 500 state: absent # Update a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer name: ansible load balancer updated description: Testing the update of a load balancer with ansible wait: true wait_timeout: 500 state: update # Add server to a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding server to a load balancer with ansible add_server_ips: - server identifier (id or name) wait: true wait_timeout: 500 state: update # Remove server from a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Removing server from a load balancer with ansible remove_server_ips: - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) wait: true wait_timeout: 500 state: update # Add rules to a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible add_rules: - protocol: TCP port_balancer: 70 port_server: 70 source: 0.0.0.0 - protocol: TCP port_balancer: 60 port_server: 60 source: 0.0.0.0 wait: true wait_timeout: 500 state: update # Remove rules from a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible remove_rules: - rule_id #1 - rule_id #2 - ... wait: true wait_timeout: 500 state: update ''' RETURN = ''' load_balancer: description: Information about the load balancer that was processed type: dict sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' returned: always ''' import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.oneandone import ( get_load_balancer, get_server, get_datacenter, OneAndOneResources, wait_for_resource_creation_completion ) HAS_ONEANDONE_SDK = True try: import oneandone.client except ImportError: HAS_ONEANDONE_SDK = False DATACENTERS = ['US', 'ES', 'DE', 'GB'] HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] def _check_mode(module, result): if module.check_mode: module.exit_json( changed=result ) def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): """ Assigns servers to a load balancer. """ try: attach_servers = [] for server_id in server_ids: server = get_server(oneandone_conn, server_id, True) attach_server = oneandone.client.AttachServer( server_id=server['id'], server_ip_id=next(iter(server['ips'] or []), None)['id'] ) attach_servers.append(attach_server) if module.check_mode: if attach_servers: return True return False load_balancer = oneandone_conn.attach_load_balancer_server( load_balancer_id=load_balancer_id, server_ips=attach_servers) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): """ Unassigns a server/IP from a load balancer. """ try: if module.check_mode: lb_server = oneandone_conn.get_load_balancer_server( load_balancer_id=load_balancer_id, server_ip_id=server_ip_id) if lb_server: return True return False load_balancer = oneandone_conn.remove_load_balancer_server( load_balancer_id=load_balancer_id, server_ip_id=server_ip_id) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): """ Adds new rules to a load_balancer. """ try: load_balancer_rules = [] for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( protocol=rule['protocol'], port_balancer=rule['port_balancer'], port_server=rule['port_server'], source=rule['source']) load_balancer_rules.append(load_balancer_rule) if module.check_mode: lb_id = get_load_balancer(oneandone_conn, load_balancer_id) if (load_balancer_rules and lb_id): return True return False load_balancer = oneandone_conn.add_load_balancer_rule( load_balancer_id=load_balancer_id, load_balancer_rules=load_balancer_rules ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): """ Removes a rule from a load_balancer. """ try: if module.check_mode: rule = oneandone_conn.get_load_balancer_rule( load_balancer_id=load_balancer_id, rule_id=rule_id) if rule: return True return False load_balancer = oneandone_conn.remove_load_balancer_rule( load_balancer_id=load_balancer_id, rule_id=rule_id ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def update_load_balancer(module, oneandone_conn): """ Updates a load_balancer based on input arguments. Load balancer rules and server ips can be added/removed to/from load balancer. Load balancer name, description, health_check_test, health_check_interval, persistence, persistence_time, and method can be updated as well. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ load_balancer_id = module.params.get('load_balancer') name = module.params.get('name') description = module.params.get('description') health_check_test = module.params.get('health_check_test') health_check_interval = module.params.get('health_check_interval') health_check_path = module.params.get('health_check_path') health_check_parse = module.params.get('health_check_parse') persistence = module.params.get('persistence') persistence_time = module.params.get('persistence_time') method = module.params.get('method') add_server_ips = module.params.get('add_server_ips') remove_server_ips = module.params.get('remove_server_ips') add_rules = module.params.get('add_rules') remove_rules = module.params.get('remove_rules') changed = False load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) if load_balancer is None: _check_mode(module, False) if (name or description or health_check_test or health_check_interval or health_check_path or health_check_parse or persistence or persistence_time or method): _check_mode(module, True) load_balancer = oneandone_conn.modify_load_balancer( load_balancer_id=load_balancer['id'], name=name, description=description, health_check_test=health_check_test, health_check_interval=health_check_interval, health_check_path=health_check_path, health_check_parse=health_check_parse, persistence=persistence, persistence_time=persistence_time, method=method) changed = True if add_server_ips: if module.check_mode: _check_mode(module, _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)) load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) changed = True if remove_server_ips: chk_changed = False for server_ip_id in remove_server_ips: if module.check_mode: chk_changed |= _remove_load_balancer_server(module, oneandone_conn, load_balancer['id'], server_ip_id) _remove_load_balancer_server(module, oneandone_conn, load_balancer['id'], server_ip_id) _check_mode(module, chk_changed) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) changed = True if add_rules: load_balancer = _add_load_balancer_rules(module, oneandone_conn, load_balancer['id'], add_rules) _check_mode(module, load_balancer) changed = True if remove_rules: chk_changed = False for rule_id in remove_rules: if module.check_mode: chk_changed |= _remove_load_balancer_rule(module, oneandone_conn, load_balancer['id'], rule_id) _remove_load_balancer_rule(module, oneandone_conn, load_balancer['id'], rule_id) _check_mode(module, chk_changed) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) changed = True try: return (changed, load_balancer) except Exception as ex: module.fail_json(msg=str(ex)) def create_load_balancer(module, oneandone_conn): """ Create a new load_balancer. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ try: name = module.params.get('name') description = module.params.get('description') health_check_test = module.params.get('health_check_test') health_check_interval = module.params.get('health_check_interval') health_check_path = module.params.get('health_check_path') health_check_parse = module.params.get('health_check_parse') persistence = module.params.get('persistence') persistence_time = module.params.get('persistence_time') method = module.params.get('method') datacenter = module.params.get('datacenter') rules = module.params.get('rules') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') wait_interval = module.params.get('wait_interval') load_balancer_rules = [] datacenter_id = None if datacenter is not None: datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: module.fail_json( msg='datacenter %s not found.' % datacenter) for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( protocol=rule['protocol'], port_balancer=rule['port_balancer'], port_server=rule['port_server'], source=rule['source']) load_balancer_rules.append(load_balancer_rule) _check_mode(module, True) load_balancer_obj = oneandone.client.LoadBalancer( health_check_path=health_check_path, health_check_parse=health_check_parse, name=name, description=description, health_check_test=health_check_test, health_check_interval=health_check_interval, persistence=persistence, persistence_time=persistence_time, method=method, datacenter_id=datacenter_id ) load_balancer = oneandone_conn.create_load_balancer( load_balancer=load_balancer_obj, load_balancer_rules=load_balancer_rules ) if wait: wait_for_resource_creation_completion(oneandone_conn, OneAndOneResources.load_balancer, load_balancer['id'], wait_timeout, wait_interval) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh changed = True if load_balancer else False _check_mode(module, False) return (changed, load_balancer) except Exception as ex: module.fail_json(msg=str(ex)) def remove_load_balancer(module, oneandone_conn): """ Removes a load_balancer. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ try: lb_id = module.params.get('name') load_balancer_id = get_load_balancer(oneandone_conn, lb_id) if module.check_mode: if load_balancer_id is None: _check_mode(module, False) _check_mode(module, True) load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) changed = True if load_balancer else False return (changed, { 'id': load_balancer['id'], 'name': load_balancer['name'] }) except Exception as ex: module.fail_json(msg=str(ex)) def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( type='str', default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', default=os.environ.get('ONEANDONE_API_URL')), load_balancer=dict(type='str'), name=dict(type='str'), description=dict(type='str'), health_check_test=dict( choices=HEALTH_CHECK_TESTS), health_check_interval=dict(type='str'), health_check_path=dict(type='str'), health_check_parse=dict(type='str'), persistence=dict(type='bool'), persistence_time=dict(type='str'), method=dict( choices=METHODS), datacenter=dict( choices=DATACENTERS), rules=dict(type='list', default=[]), add_server_ips=dict(type='list', default=[]), remove_server_ips=dict(type='list', default=[]), add_rules=dict(type='list', default=[]), remove_rules=dict(type='list', default=[]), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), wait_interval=dict(type='int', default=5), state=dict(type='str', default='present', choices=['present', 'absent', 'update']), ), supports_check_mode=True ) if not HAS_ONEANDONE_SDK: module.fail_json(msg='1and1 required for this module') if not module.params.get('auth_token'): module.fail_json( msg='auth_token parameter is required.') if not module.params.get('api_url'): oneandone_conn = oneandone.client.OneAndOneService( api_token=module.params.get('auth_token')) else: oneandone_conn = oneandone.client.OneAndOneService( api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) state = module.params.get('state') if state == 'absent': if not module.params.get('name'): module.fail_json( msg="'name' parameter is required for deleting a load balancer.") try: (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) elif state == 'update': if not module.params.get('load_balancer'): module.fail_json( msg="'load_balancer' parameter is required for updating a load balancer.") try: (changed, load_balancer) = update_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) elif state == 'present': for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', 'persistence_time', 'method', 'rules'): if not module.params.get(param): module.fail_json( msg="%s parameter is required for new load balancers." % param) try: (changed, load_balancer) = create_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) module.exit_json(changed=changed, load_balancer=load_balancer) if __name__ == '__main__': main() import sys from abc import ABCMeta, abstractmethod import requests from oauthlib.oauth2 import BackendApplicationClient from requests_oauthlib import OAuth2Session from .const import CLOUD_URLS class Session(object): """ abstract class as base for sessions """ __metaclass__ = ABCMeta def __init__(self): self._session = None @property def session(self): """ property to access the session (will be created on first access) """ if self._session is None: self.init_session() return self._session @abstractmethod def init_session(self): """ will automatically be called, when the session property is accessed for the first time """ pass @abstractmethod def is_configured(self): """ must return True, when the session is ready to use """ pass class LocalSession(Session): """ local session that directly communicates with the LaMetric device without using the Cloud-API (note: you need to register once using CloudAuth before the local authentication can be used) """ def __init__(self): Session.__init__(self) def init_session(self): """ init the local session """ self._session = requests.Session() def is_configured(self): """ local session is always configured """ return True class CloudSession(Session): """ cloud session that uses authentication via OAuth2 with the LaMetric Cloud """ def __init__( self, client_id=None, client_secret=None ): Session.__init__(self) # either use given credentials or get them from env variables self.set_credentials(client_id, client_secret) def set_credentials(self, client_id=None, client_secret=None): """ set given credentials and reset the session """ self._client_id = client_id self._client_secret = client_secret # make sure to reset session due to credential change self._session = None def is_configured(self): """ returns True, if cloud session is configured """ return self._session is not None def init_session(self, get_token=True): """ init a new oauth2 session that is required to access the cloud :param bool get_token: if True, a token will be obtained, after the session has been created """ if (self._client_id is None) or (self._client_secret is None): sys.exit( "Please make sure to set the client id and client secret " "via the constructor, the environment variables or the config " "file; otherwise, the LaMetric cloud cannot be accessed. " "Abort!" ) self._session = OAuth2Session( client=BackendApplicationClient(client_id=self._client_id) ) if get_token is True: # get oauth token self.get_token() def get_token(self): """ get current oauth token """ self.token = self._session.fetch_token( token_url=CLOUD_URLS["get_token"][1], client_id=self._client_id, client_secret=self._client_secret ) """ A custom AdminSite for AdminViewPermissionsTest.test_login_has_permission(). """ from __future__ import unicode_literals from django.contrib import admin from django.contrib.auth import get_permission_codename from django.contrib.auth.forms import AuthenticationForm from . import admin as base_admin, models PERMISSION_NAME = 'admin_views.%s' % get_permission_codename('change', models.Article._meta) class PermissionAdminAuthenticationForm(AuthenticationForm): def confirm_login_allowed(self, user): from django import forms if not user.is_active or not (user.is_staff or user.has_perm(PERMISSION_NAME)): raise forms.ValidationError('permission denied') class HasPermissionAdmin(admin.AdminSite): login_form = PermissionAdminAuthenticationForm def has_permission(self, request): return ( request.user.is_active and (request.user.is_staff or request.user.has_perm(PERMISSION_NAME)) ) site = HasPermissionAdmin(name="has_permission_admin") site.register(models.Article, base_admin.ArticleAdmin) """ Codec for the Punicode encoding, as specified in RFC 3492 Written by Martin v. Löwis. """ import codecs ##################### Encoding ##################################### def segregate(str): """3.1 Basic code point segregation""" base = bytearray() extended = set() for c in str: if ord(c) < 128: base.append(ord(c)) else: extended.add(c) extended = sorted(extended) return bytes(base), extended def selective_len(str, max): """Return the length of str, considering only characters below max.""" res = 0 for c in str: if ord(c) < max: res += 1 return res def selective_find(str, char, index, pos): """Return a pair (index, pos), indicating the next occurrence of char in str. index is the position of the character considering only ordinals up to and including char, and pos is the position in the full string. index/pos is the starting position in the full string.""" l = len(str) while 1: pos += 1 if pos == l: return (-1, -1) c = str[pos] if c == char: return index+1, pos elif c < char: index += 1 def insertion_unsort(str, extended): """3.2 Insertion unsort coding""" oldchar = 0x80 result = [] oldindex = -1 for c in extended: index = pos = -1 char = ord(c) curlen = selective_len(str, char) delta = (curlen+1) * (char - oldchar) while 1: index,pos = selective_find(str,c,index,pos) if index == -1: break delta += index - oldindex result.append(delta-1) oldindex = index delta = 0 oldchar = char return result def T(j, bias): # Punycode parameters: tmin = 1, tmax = 26, base = 36 res = 36 * (j + 1) - bias if res < 1: return 1 if res > 26: return 26 return res digits = b"abcdefghijklmnopqrstuvwxyz0123456789" def generate_generalized_integer(N, bias): """3.3 Generalized variable-length integers""" result = bytearray() j = 0 while 1: t = T(j, bias) if N < t: result.append(digits[N]) return bytes(result) result.append(digits[t + ((N - t) % (36 - t))]) N = (N - t) // (36 - t) j += 1 def adapt(delta, first, numchars): if first: delta //= 700 else: delta //= 2 delta += delta // numchars # ((base - tmin) * tmax) // 2 == 455 divisions = 0 while delta > 455: delta = delta // 35 # base - tmin divisions += 36 bias = divisions + (36 * delta // (delta + 38)) return bias def generate_integers(baselen, deltas): """3.4 Bias adaptation""" # Punycode parameters: initial bias = 72, damp = 700, skew = 38 result = bytearray() bias = 72 for points, delta in enumerate(deltas): s = generate_generalized_integer(delta, bias) result.extend(s) bias = adapt(delta, points==0, baselen+points+1) return bytes(result) def punycode_encode(text): base, extended = segregate(text) deltas = insertion_unsort(text, extended) extended = generate_integers(len(base), deltas) if base: return base + b"-" + extended return extended ##################### Decoding ##################################### def decode_generalized_number(extended, extpos, bias, errors): """3.3 Generalized variable-length integers""" result = 0 w = 1 j = 0 while 1: try: char = ord(extended[extpos]) except IndexError: if errors == "strict": raise UnicodeError("incomplete punicode string") return extpos + 1, None extpos += 1 if 0x41 <= char <= 0x5A: # A-Z digit = char - 0x41 elif 0x30 <= char <= 0x39: digit = char - 22 # 0x30-26 elif errors == "strict": raise UnicodeError("Invalid extended code point '%s'" % extended[extpos]) else: return extpos, None t = T(j, bias) result += digit * w if digit < t: return extpos, result w = w * (36 - t) j += 1 def insertion_sort(base, extended, errors): """3.2 Insertion unsort coding""" char = 0x80 pos = -1 bias = 72 extpos = 0 while extpos < len(extended): newpos, delta = decode_generalized_number(extended, extpos, bias, errors) if delta is None: # There was an error in decoding. We can't continue because # synchronization is lost. return base pos += delta+1 char += pos // (len(base) + 1) if char > 0x10FFFF: if errors == "strict": raise UnicodeError("Invalid character U+%x" % char) char = ord('?') pos = pos % (len(base) + 1) base = base[:pos] + chr(char) + base[pos:] bias = adapt(delta, (extpos == 0), len(base)) extpos = newpos return base def punycode_decode(text, errors): if isinstance(text, str): text = text.encode("ascii") if isinstance(text, memoryview): text = bytes(text) pos = text.rfind(b"-") if pos == -1: base = "" extended = str(text, "ascii").upper() else: base = str(text[:pos], "ascii", errors) extended = str(text[pos+1:], "ascii").upper() return insertion_sort(base, extended, errors) ### Codec APIs class Codec(codecs.Codec): def encode(self, input, errors='strict'): res = punycode_encode(input) return res, len(input) def decode(self, input, errors='strict'): if errors not in ('strict', 'replace', 'ignore'): raise UnicodeError("Unsupported error handling "+errors) res = punycode_decode(input, errors) return res, len(input) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return punycode_encode(input) class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): if self.errors not in ('strict', 'replace', 'ignore'): raise UnicodeError("Unsupported error handling "+self.errors) return punycode_decode(input, self.errors) class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='punycode', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, ) # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from selenium.webdriver import Safari @pytest.fixture def driver_class(): return Safari @pytest.fixture def driver_kwargs(): return {} @pytest.fixture def driver(driver_class, driver_kwargs): driver = driver_class(**driver_kwargs) yield driver driver.quit() #!/usr/bin/env python import math from ecl_controller import PyECLRollController, PyECLPitchController, \ PyECLYawController from mtecs import PyMTecs, PyLimitOverride import sys import numpy as np class FixedWingController: """Controls a fixed wing aircraft""" def __init__(self, params, control_total_energy_divider, mode): """ :param control_total_energy_divider run total enery controller every xth time """ self.mode = mode # since we run on python2, make sure that all params are floats for p in params: params[p] = float(params[p]) self.params = params # Attitude Control self.c_roll = PyECLRollController() self.c_pitch = PyECLPitchController() self.c_yaw = PyECLYawController() att_tc = params["att_tc"] k_p = params["k_p"] k_ff = params["k_ff"] k_i = params["k_i"] i_max = params["i_max"] self.c_roll.set_time_constant(att_tc) self.c_roll.set_k_p(k_p) self.c_roll.set_k_i(k_i) self.c_roll.set_integrator_max(i_max) self.c_roll.set_k_ff(k_ff) self.c_pitch.set_time_constant(att_tc) self.c_pitch.set_k_p(k_p) self.c_pitch.set_k_i(k_i) self.c_pitch.set_integrator_max(i_max) self.c_pitch.set_k_ff(k_ff) self.c_pitch.set_max_rate_pos(params["pitch_max_rate_pos"]) self.c_pitch.set_max_rate_neg(params["pitch_max_rate_neg"]) self.c_pitch.set_roll_ff(params["pitch_roll_ff"]) self.c_yaw.set_time_constant(att_tc) self.c_yaw.set_k_p(k_p) self.c_yaw.set_k_i(k_i) self.c_yaw.set_integrator_max(i_max) self.c_yaw.set_k_ff(k_ff) self.c_yaw.set_coordinated_min_speed(params["coordinated_min_speed"]) self.c_yaw.set_coordinated_method(params["coordinated_method"]) # Altitude and speed control (total energy control) # XXX load values from params argument where it makes sense c_te_params = { "MT_ENABLED": 1, "MT_THR_FF": params["mtecs_throttle_ff"], "MT_THR_P": params["mtecs_throttle_p"], "MT_THR_I": params["mtecs_throttle_i"], "MT_THR_OFF": params["throttle_default"], "MT_PIT_FF": params["mtecs_pitch_ff"], "MT_PIT_P": params["mtecs_pitch_p"], "MT_PIT_I": params["mtecs_pitch_i"], "MT_PIT_OFF": 0.0, "MT_THR_MIN": 0.0, "MT_THR_MAX": 1.0, "MT_PIT_MIN": -45.0, "MT_PIT_MAX": 20.0, "MT_ALT_LP": params["mtecs_altitude_lowpass_cutoff"], "MT_FPA_LP": params["mtecs_flightpathangle_lowpass_cutoff"], "MT_FPA_P": params["mtecs_fpa_p"], "MT_FPA_D": 0.0, "MT_FPA_D_LP": 1.0, "MT_FPA_MIN": -20.0, "MT_FPA_MAX": 30.0, "MT_A_LP": params["mtecs_airspeed_lowpass_cutoff"], "MT_AD_LP": params["mtecs_airspeed_derivative_lowpass_cutoff"], "MT_ACC_P": params["mtecs_acc_p"], "MT_ACC_D": 0.0, "MT_ACC_D_LP": 0.5, "MT_ACC_MIN": -40.0, "MT_ACC_MAX": 40.0, "MT_TKF_THR_MIN": 1.0, "MT_TKF_THR_MAX": 1.0, "MT_TKF_PIT_MIN": 0.0, "MT_TKF_PIT_MAX": 45.0, "MT_USP_THR_MIN": 1.0, "MT_USP_THR_MAX": 1.0, "MT_USP_PIT_MIN": -45.0, "MT_USP_PIT_MAX": 0.0, "MT_LND_THR_MIN": 0.0, "MT_LND_THR_MAX": 0.0, "MT_LND_PIT_MIN": -5.0, "MT_LND_PIT_MAX": 15.0, "MT_THR_I_MAX": 10.0, "MT_PIT_I_MAX": 10.0, "FW_AIRSPD_MIN": params["airspeed_min"], } self.c_te = PyMTecs(c_te_params) self.control_count = 0 self.control_total_energy_divider = control_total_energy_divider def get_ground_speed_vec(self, state): """Retruns a vector of the ground speed""" return np.array([state["speed_body_u"], state["speed_body_v"], state["speed_body_w"],]) def call_mtecs(self, state, setpoint): """helper function to pass the right arguments to mtecs""" flightpathangle = 0.0 ground_speed = self.get_ground_speed_vec(state) ground_speed_length = np.linalg.norm(ground_speed) if ground_speed_length > sys.float_info.epsilon: flightpathangle = -np.arcsin(ground_speed[2]/ground_speed_length) limitoverride = PyLimitOverride() # if (_vehicle_status.engine_failure || _vehicle_status.engine_failure_cmd) { # /* Force the slow downwards spiral */ # limitOverride.enablePitchMinOverride(-1.0f); # limitOverride.enablePitchMaxOverride(5.0f); # } else if (climbout_mode) { # limitOverride.enablePitchMinOverride(M_RAD_TO_DEG_F * climbout_pitch_min_rad); # } else { # limitOverride.disablePitchMinOverride(); # } limitoverride.disablePitchMinOverride(); # if (pitch_max_special) { # /* Use the maximum pitch from the argument */ # limitOverride.enablePitchMaxOverride(M_RAD_TO_DEG_F * pitch_max_rad); # } else { # /* use pitch max set by MT param */ # limitOverride.disablePitchMaxOverride(); # } limitoverride.disablePitchMaxOverride(); self.c_te.updateAltitudeSpeed(flightpathangle, state["altitude"], setpoint["altitude"], state["airspeed"], setpoint["velocity"], self.c_te.mtecs_mode_normal, limitoverride); return flightpathangle def control(self, **kwargs): """ Input: output: control signal normed [-1 1] """ y = kwargs["state"] r = kwargs["setpoint"] params = kwargs["parameters"] control_data = { "roll_setpoint": r["roll"], "pitch_setpoint": r["pitch"], "yaw_setpoint": r["yaw"], "roll_rate_setpoint": r["roll_rate"], "pitch_rate_setpoint": r["pitch_rate"], "yaw_rate_setpoint": r["yaw_rate"], "airspeed_min": params["airspeed_min"], "airspeed_max": params["airspeed_max"], "altitude_setpoint": r["altitude"], "velocity_setpoint": r["velocity"], } for k,v in y.items(): control_data[k] = v if self.mode == "position": if self.control_count % self.control_total_energy_divider == 0: flightpathangle = self.call_mtecs(y, r) control_data["pitch_setpoint"] = self.c_te.getPitchSetpoint() throttle = self.c_te.getThrottleSetpoint() # save other relevant data control_data["airspeed_filtered"] = self.c_te.getAirspeedLowpassState() control_data["altitude_filtered"] = self.c_te.getAltitudeLowpassState() control_data["flightpathangle"] = flightpathangle control_data["flightpathangle_filtered"] = self.c_te.getFlightPathAngleLowpassState() control_data["airspeed_derivative_filtered"] = self.c_te.getAirspeedDerivativeLowpassState() else: throttle = self.params["throttle_default"] control_data["throttle_setpoint"] = throttle control_data["roll_rate_setpoint"] = self.c_roll.control_attitude(control_data) control_data["pitch_rate_setpoint"] = self.c_pitch.control_attitude(control_data) # control_data["yaw_rate_setpoint"] = self.c_yaw.control_attitude(control_data) control_data["yaw_rate_setpoint"] = 0.0 # XXX # print("control data", control_data) aileron = self.c_roll.control_bodyrate(control_data) elevator = self.c_pitch.control_bodyrate(control_data) rudder = self.c_yaw.control_bodyrate(control_data) u = [aileron, elevator, rudder, throttle] # print("u", u) return [u, control_data] # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os from metrics import Metric from telemetry.value import list_of_scalar_values from telemetry.value import scalar class MediaMetric(Metric): """MediaMetric class injects and calls JS responsible for recording metrics. Default media metrics are collected for every media element in the page, such as decoded_frame_count, dropped_frame_count, decoded_video_bytes, and decoded_audio_bytes. """ def __init__(self, tab): super(MediaMetric, self).__init__() with open(os.path.join(os.path.dirname(__file__), 'media.js')) as f: js = f.read() tab.ExecuteJavaScript(js) self._results = None self._skip_basic_metrics = False def Start(self, page, tab): """Create the media metrics for all media elements in the document.""" if hasattr(page, 'skip_basic_metrics'): self._skip_basic_metrics = page.skip_basic_metrics tab.ExecuteJavaScript('window.__createMediaMetricsForDocument()') def Stop(self, page, tab): self._results = tab.EvaluateJavaScript('window.__getAllMetrics()') # Optional |exclude_metrics| args are not in base class Metric. # pylint: disable=W0221 def AddResults(self, tab, results, exclude_metrics=None): """Reports all recorded metrics as Telemetry perf results.""" exclude_metrics = exclude_metrics or [] trace_names = [] for media_metric in self._results: trace_names.append(self._AddResultsForMediaElement(media_metric, results, exclude_metrics)) return '_'.join(trace_names) or tab.url def _AddResultsForMediaElement(self, media_metric, results, exclude_metrics): """Reports metrics for one media element. Media metrics contain an ID identifying the media element and values: media_metric = { 'id': 'video_1', 'metrics': { 'time_to_play': 120, 'decoded_bytes': 13233, ... } } """ def AddOneResult(metric, unit): if metric in exclude_metrics: return metrics = media_metric['metrics'] for m in metrics: if m.startswith(metric): special_label = m[len(metric):] trace_name = '%s.%s%s' % (metric, trace, special_label) if isinstance(metrics[m], list): results.AddValue(list_of_scalar_values.ListOfScalarValues( results.current_page, trace_name, unit, values=[float(v) for v in metrics[m]], important=True)) else: results.AddValue(scalar.ScalarValue( results.current_page, trace_name, unit, value=float(metrics[m]), important=True)) trace = media_metric['id'] if not trace: logging.error('Metrics ID is missing in results.') return if not self._skip_basic_metrics: AddOneResult('buffering_time', 'ms') AddOneResult('decoded_audio_bytes', 'bytes') AddOneResult('decoded_video_bytes', 'bytes') AddOneResult('decoded_frame_count', 'frames') AddOneResult('dropped_frame_count', 'frames') AddOneResult('time_to_play', 'ms') AddOneResult('avg_loop_time', 'ms') AddOneResult('seek', 'ms') return trace __all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper'] from _yaml import CParser, CEmitter from .constructor import * from .serializer import * from .representer import * from .resolver import * class CBaseLoader(CParser, BaseConstructor, BaseResolver): def __init__(self, stream): CParser.__init__(self, stream) BaseConstructor.__init__(self) BaseResolver.__init__(self) class CSafeLoader(CParser, SafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) SafeConstructor.__init__(self) Resolver.__init__(self) class CLoader(CParser, Constructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) Constructor.__init__(self) Resolver.__init__(self) class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self) class CSafeDumper(CEmitter, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self) class CDumper(CEmitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self) #!/usr/bin/env python # # Generated Tue Jun 29 16:14:16 2004 by generateDS.py. # import sys from xml.dom import minidom from xml.sax import handler, make_parser import xmlbehavior as supermod class xml_behaviorSub(supermod.xml_behavior): def __init__(self, base_impl_url='', behaviors=None): supermod.xml_behavior.__init__(self, base_impl_url, behaviors) def get_class_dictionary(self): return self.classDictionary # # Make a dictionary whose keys are class names and whose # values are the behaviors for that class. def make_class_dictionary(self, cleanupNameFunc): self.classDictionary = {} self.behaviors.make_class_dictionary(self.classDictionary, cleanupNameFunc) supermod.xml_behavior.subclass = xml_behaviorSub # end class xml_behaviorSub class behaviorsSub(supermod.behaviors): def __init__(self, behavior=None): supermod.behaviors.__init__(self, behavior) def make_class_dictionary(self, classDictionary, cleanupNameFunc): for behavior in self.behavior: behavior.make_class_dictionary(classDictionary, cleanupNameFunc) supermod.behaviors.subclass = behaviorsSub # end class behaviorsSub class behaviorSub(supermod.behavior): def __init__(self, klass='', name='', return_type='', args=None, impl_url=''): supermod.behavior.__init__(self, klass, name, return_type, args, impl_url) def make_class_dictionary(self, classDictionary, cleanupNameFunc): className = cleanupNameFunc(self.klass) if className not in classDictionary: classDictionary[className] = [] classDictionary[className].append(self) supermod.behavior.subclass = behaviorSub # end class behaviorSub class argsSub(supermod.args): def __init__(self, arg=None): supermod.args.__init__(self, arg) supermod.args.subclass = argsSub # end class argsSub class argSub(supermod.arg): def __init__(self, name='', data_type=''): supermod.arg.__init__(self, name, data_type) supermod.arg.subclass = argSub # end class argSub class ancillariesSub(supermod.ancillaries): def __init__(self, ancillary=None): supermod.ancillaries.__init__(self, ancillary) # # XMLBehaviors # supermod.ancillaries.subclass = ancillariesSub # end class ancillariesSub class ancillarySub(supermod.ancillary): def __init__(self, klass='', role='', return_type='', name='', args=None, impl_url=''): supermod.ancillary.__init__(self, klass, role, return_type, name, args, impl_url) supermod.ancillary.subclass = ancillarySub # end class ancillarySub def saxParse(inFileName): parser = make_parser() documentHandler = supermod.SaxXml_behaviorHandler() parser.setDocumentHandler(documentHandler) parser.parse('file:%s' % inFileName) rootObj = documentHandler.getRoot() #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def saxParseString(inString): parser = make_parser() documentHandler = supermod.SaxContentHandler() parser.setDocumentHandler(documentHandler) parser.feed(inString) parser.close() rootObj = documentHandler.getRoot() #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def parse(inFilename): doc = minidom.parse(inFilename) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) doc = None return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) doc = None #sys.stdout.write('\n') #rootObj.export(sys.stdout, 0) return rootObj def parseLiteral(inFilename): doc = minidom.parse(inFilename) rootNode = doc.childNodes[0] rootObj = supermod.xml_behavior.factory() rootObj.build(rootNode) #sys.stdout.write('from xmlbehavior_sub import *\n\n') #sys.stdout.write('rootObj = xml_behavior(\n') #rootObj.exportLiteral(sys.stdout, 0) #sys.stdout.write(')\n') doc = None return rootObj USAGE_TEXT = """ Usage: python ???.py """ def usage(): print USAGE_TEXT sys.exit(-1) def main(): args = sys.argv[1:] if len(args) != 1: usage() infilename = args[0] root = parse(infilename) sys.stdout.write('\n') root.export(sys.stdout, 0) if __name__ == '__main__': main() #import pdb #pdb.run('main()') # Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import atexit import contextlib import sys from .ansitowin32 import AnsiToWin32 orig_stdout = None orig_stderr = None wrapped_stdout = None wrapped_stderr = None atexit_done = False def reset_all(): if AnsiToWin32 is not None: # Issue #74: objects might become None at exit AnsiToWin32(orig_stdout).reset_all() def init(autoreset=False, convert=None, strip=None, wrap=True): if not wrap and any([autoreset, convert, strip]): raise ValueError('wrap=False conflicts with any other arg=True') global wrapped_stdout, wrapped_stderr global orig_stdout, orig_stderr orig_stdout = sys.stdout orig_stderr = sys.stderr if sys.stdout is None: wrapped_stdout = None else: sys.stdout = wrapped_stdout = \ wrap_stream(orig_stdout, convert, strip, autoreset, wrap) if sys.stderr is None: wrapped_stderr = None else: sys.stderr = wrapped_stderr = \ wrap_stream(orig_stderr, convert, strip, autoreset, wrap) global atexit_done if not atexit_done: atexit.register(reset_all) atexit_done = True def deinit(): if orig_stdout is not None: sys.stdout = orig_stdout if orig_stderr is not None: sys.stderr = orig_stderr @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) try: yield finally: deinit() def reinit(): if wrapped_stdout is not None: sys.stdout = wrapped_stdout if wrapped_stderr is not None: sys.stderr = wrapped_stderr def wrap_stream(stream, convert, strip, autoreset, wrap): if wrap: wrapper = AnsiToWin32(stream, convert=convert, strip=strip, autoreset=autoreset) if wrapper.should_wrap(): stream = wrapper.stream return stream #@+leo-ver=5-thin #@+node:2014fall.20141212095015.1775: * @file wsgi.py # coding=utf-8 # 上面的程式內容編碼必須在程式的第一或者第二行才會有作用 ################# (1) 模組導入區 # 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝 #@@language python #@@tabwidth -4 #@+<> #@+node:2014fall.20141212095015.1776: ** <> (wsgi) import cherrypy # 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝 import os # 導入 random 模組 import random # 導入 gear 模組 import gear ################# (2) 廣域變數設定區 # 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 設定在雲端與近端的資料儲存目錄 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示程式在雲端執行 download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # 表示程式在近端執行 download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" '''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印 # 利用 input() 取得的資料型別為字串 toprint = input("要印甚麼內容?") # 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換 repeat_no = int(input("重複列印幾次?")) for i in range(repeat_no): print(toprint) ''' #@-<> #@+others #@+node:2014fall.20141212095015.1777: ** class Hello ################# (3) 程式類別定義區 # 以下改用 CherryPy 網際框架程式架構 # 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計 class Hello(object): # Hello 類別的啟動設定 _cp_config = { 'tools.encode.encoding': 'utf-8', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : 'file', #'tools.sessions.locking' : 'explicit', # session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄 'tools.sessions.storage_path' : data_dir+'/tmp', # session 有效時間設為 60 分鐘 'tools.sessions.timeout' : 60 } #@+others #@+node:2014fall.20141212095015.2004: *3* __init__ def __init__(self): # 配合透過案例啟始建立所需的目錄 if not os.path.isdir(data_dir+'/tmp'): os.mkdir(data_dir+'/tmp') if not os.path.isdir(data_dir+"/downloads"): os.mkdir(data_dir+"/downloads") if not os.path.isdir(data_dir+"/images"): os.mkdir(data_dir+"/images") #@+node:2014fall.20141212095015.1778: *3* index_orig # 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行 @cherrypy.expose # index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法 # 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容 def index_orig(self, toprint="Hello World!"): return toprint #@+node:2014fall.20141212095015.1779: *3* hello @cherrypy.expose def hello(self, toprint="Hello World!"): return toprint #@+node:2014fall.20141215194146.1791: *3* index @cherrypy.expose def index(self): outstring = '''

期末考練習1

40223226

張政皓

drawgear2(繪出兩顆齒輪)
''' return outstring #@+node:2015.20150330144929.1713: *3* twoDgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def twoDgear(self, N=20, M=5, P=15): outstring = '''
齒數:
模數:
壓力角:
''' return outstring #@+node:2015.20150331094055.1733: *3* threeDgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def threeDgear(self, N=20, M=5, P=15): outstring = '''
齒數:
模數:
壓力角:
''' return outstring #@+node:2015.20150330144929.1762: *3* do2Dgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def do2Dgear(self, N=20, M=5, P=15): outstring = ''' ''' return outstring #@+node:2015.20150331094055.1735: *3* do3Dgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def do3Dgear(self, N=20, M=5, P=15): outstring = ''' ''' return outstring #@+node:2015.20150330144929.1765: *3* mygeartest @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def mygeartest(self, N=20, M=5, P=15): outstring = ''' ''' return outstring #@+node:2015.20150621222226.1: *3* drawspur @cherrypy.expose # N 為上齒數, M 為下齒數, P 為壓力角 def drawspur(self,N1=15,N2=24, M=4, P=20,midx=400): outstring = '''
上齒數:
下齒數:

齒輪數為介於 15-80 的整數


返回
''' return outstring #@+node:amd.20150415215023.1: *3* mygeartest2 @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def mygeartest2(self, N1=15, N2=24, M=4, P=15): outstring = '''
返回
''' return outstring #@+node:2015.20150331094055.1737: *3* my3Dgeartest @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def my3Dgeartest(self, N=20, M=5, P=15): outstring = ''' ''' return outstring #@+node:2014fall.20141215194146.1793: *3* doCheck @cherrypy.expose def doCheck(self, guess=None): # 假如使用者直接執行 doCheck, 則設法轉回根方法 if guess is None: raise cherrypy.HTTPRedirect("/") # 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況 try: theanswer = int(cherrypy.session.get('answer')) except: raise cherrypy.HTTPRedirect("/") # 經由表單所取得的 guess 資料型別為 string try: theguess = int(guess) except: return "error " + self.guessform() # 每執行 doCheck 一次,次數增量一次 cherrypy.session['count'] += 1 # 答案與所猜數字進行比對 if theanswer < theguess: return "big " + self.guessform() elif theanswer > theguess: return "small " + self.guessform() else: # 已經猜對, 從 session 取出累計猜測次數 thecount = cherrypy.session.get('count') return "exact: 再猜" #@+node:2014fall.20141215194146.1789: *3* guessform def guessform(self): # 印出讓使用者輸入的超文件表單 outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''
請輸入您所猜的整數:
''' return outstring #@-others #@-others ################# (4) 程式啟動區 # 配合程式檔案所在目錄設定靜態目錄或靜態檔案 application_conf = {'/static':{ 'tools.staticdir.on': True, # 程式執行目錄下, 必須自行建立 static 目錄 'tools.staticdir.dir': _curdir+"/static"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"}, '/images':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/images"} } root = Hello() root.gear = gear.Gear() cherrypy.server.socket_port = 8080 cherrypy.server.socket_host = '127.0.0.1' if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示在 OpenSfhit 執行 application = cherrypy.Application(root, config=application_conf) else: # 表示在近端執行 cherrypy.quickstart(root, config=application_conf) #@-leo #!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the common.py file.""" import gyp.common import unittest import sys class TestTopologicallySorted(unittest.TestCase): def test_Valid(self): """Test that sorting works on a valid graph with one possible order.""" graph = { 'a': ['b', 'c'], 'b': [], 'c': ['d'], 'd': ['b'], } def GetEdge(node): return tuple(graph[node]) self.assertEqual( gyp.common.TopologicallySorted(graph.keys(), GetEdge), ['a', 'c', 'd', 'b']) def test_Cycle(self): """Test that an exception is thrown on a cyclic graph.""" graph = { 'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a'], } def GetEdge(node): return tuple(graph[node]) self.assertRaises( gyp.common.CycleError, gyp.common.TopologicallySorted, graph.keys(), GetEdge) class TestGetFlavor(unittest.TestCase): """Test that gyp.common.GetFlavor works as intended""" original_platform = '' def setUp(self): self.original_platform = sys.platform def tearDown(self): sys.platform = self.original_platform def assertFlavor(self, expected, argument, param): sys.platform = argument self.assertEqual(expected, gyp.common.GetFlavor(param)) def test_platform_default(self): self.assertFlavor('freebsd', 'freebsd9' , {}) self.assertFlavor('freebsd', 'freebsd10', {}) self.assertFlavor('openbsd', 'openbsd5' , {}) self.assertFlavor('solaris', 'sunos5' , {}); self.assertFlavor('solaris', 'sunos' , {}); self.assertFlavor('linux' , 'linux2' , {}); self.assertFlavor('linux' , 'linux3' , {}); def test_param(self): self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'}) if __name__ == '__main__': unittest.main() # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt # ERPNext - web based ERP (http://erpnext.com) # For license information, please see license.txt from __future__ import unicode_literals import frappe, unittest from frappe.utils import flt class TestMaterialRequest(unittest.TestCase): def setUp(self): frappe.defaults.set_global_default("auto_accounting_for_stock", 0) def test_make_purchase_order(self): from erpnext.stock.doctype.material_request.material_request import make_purchase_order mr = frappe.copy_doc(test_records[0]).insert() self.assertRaises(frappe.ValidationError, make_purchase_order, mr.name) mr = frappe.get_doc("Material Request", mr.name) mr.submit() po = make_purchase_order(mr.name) self.assertEquals(po.doctype, "Purchase Order") self.assertEquals(len(po.get("po_details")), len(mr.get("indent_details"))) def test_make_supplier_quotation(self): from erpnext.stock.doctype.material_request.material_request import make_supplier_quotation mr = frappe.copy_doc(test_records[0]).insert() self.assertRaises(frappe.ValidationError, make_supplier_quotation, mr.name) mr = frappe.get_doc("Material Request", mr.name) mr.submit() sq = make_supplier_quotation(mr.name) self.assertEquals(sq.doctype, "Supplier Quotation") self.assertEquals(len(sq.get("quotation_items")), len(mr.get("indent_details"))) def test_make_stock_entry(self): from erpnext.stock.doctype.material_request.material_request import make_stock_entry mr = frappe.copy_doc(test_records[0]).insert() self.assertRaises(frappe.ValidationError, make_stock_entry, mr.name) mr = frappe.get_doc("Material Request", mr.name) mr.material_request_type = "Transfer" mr.submit() se = make_stock_entry(mr.name) self.assertEquals(se.doctype, "Stock Entry") self.assertEquals(len(se.get("mtn_details")), len(mr.get("indent_details"))) def _insert_stock_entry(self, qty1, qty2): se = frappe.get_doc({ "company": "_Test Company", "doctype": "Stock Entry", "posting_date": "2013-03-01", "posting_time": "00:00:00", "purpose": "Material Receipt", "fiscal_year": "_Test Fiscal Year 2013", "mtn_details": [ { "conversion_factor": 1.0, "doctype": "Stock Entry Detail", "item_code": "_Test Item Home Desktop 100", "parentfield": "mtn_details", "incoming_rate": 100, "qty": qty1, "stock_uom": "_Test UOM 1", "transfer_qty": qty1, "uom": "_Test UOM 1", "t_warehouse": "_Test Warehouse 1 - _TC", }, { "conversion_factor": 1.0, "doctype": "Stock Entry Detail", "item_code": "_Test Item Home Desktop 200", "parentfield": "mtn_details", "incoming_rate": 100, "qty": qty2, "stock_uom": "_Test UOM 1", "transfer_qty": qty2, "uom": "_Test UOM 1", "t_warehouse": "_Test Warehouse 1 - _TC", } ] }) se.insert() se.submit() def test_completed_qty_for_purchase(self): existing_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") existing_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") # submit material request of type Purchase mr = frappe.copy_doc(test_records[0]) mr.insert() mr.submit() # check if per complete is None self.assertEquals(mr.per_ordered, None) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0) # map a purchase order from erpnext.stock.doctype.material_request.material_request import make_purchase_order po_doc = make_purchase_order(mr.name) po_doc.supplier = "_Test Supplier" po_doc.transaction_date = "2013-07-07" po_doc.get("po_details")[0].qty = 27.0 po_doc.get("po_details")[1].qty = 1.5 po_doc.get("po_details")[0].schedule_date = "2013-07-09" po_doc.get("po_details")[1].schedule_date = "2013-07-09" # check for stopped status of Material Request po = frappe.copy_doc(po_doc) po.insert() po.load_from_db() mr.update_status('Stopped') self.assertRaises(frappe.InvalidStatusError, po.submit) frappe.db.set(po, "docstatus", 1) self.assertRaises(frappe.InvalidStatusError, po.cancel) # resubmit and check for per complete mr.load_from_db() mr.update_status('Submitted') po = frappe.copy_doc(po_doc) po.insert() po.submit() # check if per complete is as expected mr.load_from_db() self.assertEquals(mr.per_ordered, 50) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 27.0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 1.5) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 27.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 1.5) po.cancel() # check if per complete is as expected mr.load_from_db() self.assertEquals(mr.per_ordered, None) self.assertEquals(mr.get("indent_details")[0].ordered_qty, None) self.assertEquals(mr.get("indent_details")[1].ordered_qty, None) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 54.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 3.0) def test_completed_qty_for_transfer(self): existing_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") existing_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") # submit material request of type Purchase mr = frappe.copy_doc(test_records[0]) mr.material_request_type = "Transfer" mr.insert() mr.submit() # check if per complete is None self.assertEquals(mr.per_ordered, None) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 54.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 3.0) from erpnext.stock.doctype.material_request.material_request import make_stock_entry # map a stock entry se_doc = make_stock_entry(mr.name) se_doc.update({ "posting_date": "2013-03-01", "posting_time": "01:00", "fiscal_year": "_Test Fiscal Year 2013", }) se_doc.get("mtn_details")[0].update({ "qty": 27.0, "transfer_qty": 27.0, "s_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) se_doc.get("mtn_details")[1].update({ "qty": 1.5, "transfer_qty": 1.5, "s_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) # make available the qty in _Test Warehouse 1 before transfer self._insert_stock_entry(27.0, 1.5) # check for stopped status of Material Request se = frappe.copy_doc(se_doc) se.insert() mr.update_status('Stopped') self.assertRaises(frappe.InvalidStatusError, se.submit) mr.update_status('Submitted') se.ignore_validate_update_after_submit = True se.submit() mr.update_status('Stopped') self.assertRaises(frappe.InvalidStatusError, se.cancel) mr.update_status('Submitted') se = frappe.copy_doc(se_doc) se.insert() se.submit() # check if per complete is as expected mr.load_from_db() self.assertEquals(mr.per_ordered, 50) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 27.0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 1.5) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 27.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 1.5) # check if per complete is as expected for Stock Entry cancelled se.cancel() mr.load_from_db() self.assertEquals(mr.per_ordered, 0) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 54.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 3.0) def test_completed_qty_for_over_transfer(self): existing_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") existing_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") # submit material request of type Purchase mr = frappe.copy_doc(test_records[0]) mr.material_request_type = "Transfer" mr.insert() mr.submit() # check if per complete is None self.assertEquals(mr.per_ordered, None) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0) # map a stock entry from erpnext.stock.doctype.material_request.material_request import make_stock_entry se_doc = make_stock_entry(mr.name) se_doc.update({ "posting_date": "2013-03-01", "posting_time": "00:00", "fiscal_year": "_Test Fiscal Year 2013", }) se_doc.get("mtn_details")[0].update({ "qty": 60.0, "transfer_qty": 60.0, "s_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) se_doc.get("mtn_details")[1].update({ "qty": 3.0, "transfer_qty": 3.0, "s_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) # make available the qty in _Test Warehouse 1 before transfer self._insert_stock_entry(60.0, 3.0) # check for stopped status of Material Request se = frappe.copy_doc(se_doc) se.insert() mr.update_status('Stopped') self.assertRaises(frappe.InvalidStatusError, se.submit) self.assertRaises(frappe.InvalidStatusError, se.cancel) mr.update_status('Submitted') se = frappe.copy_doc(se_doc) se.insert() se.submit() # check if per complete is as expected mr.load_from_db() self.assertEquals(mr.per_ordered, 100) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 60.0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 3.0) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2) # check if per complete is as expected for Stock Entry cancelled se.cancel() mr.load_from_db() self.assertEquals(mr.per_ordered, 0) self.assertEquals(mr.get("indent_details")[0].ordered_qty, 0) self.assertEquals(mr.get("indent_details")[1].ordered_qty, 0) current_requested_qty_item1 = self._get_requested_qty("_Test Item Home Desktop 100", "_Test Warehouse - _TC") current_requested_qty_item2 = self._get_requested_qty("_Test Item Home Desktop 200", "_Test Warehouse - _TC") self.assertEquals(current_requested_qty_item1, existing_requested_qty_item1 + 54.0) self.assertEquals(current_requested_qty_item2, existing_requested_qty_item2 + 3.0) def test_incorrect_mapping_of_stock_entry(self): # submit material request of type Purchase mr = frappe.copy_doc(test_records[0]) mr.material_request_type = "Transfer" mr.insert() mr.submit() # map a stock entry from erpnext.stock.doctype.material_request.material_request import make_stock_entry se_doc = make_stock_entry(mr.name) se_doc.update({ "posting_date": "2013-03-01", "posting_time": "00:00", "fiscal_year": "_Test Fiscal Year 2013", }) se_doc.get("mtn_details")[0].update({ "qty": 60.0, "transfer_qty": 60.0, "s_warehouse": "_Test Warehouse - _TC", "t_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) se_doc.get("mtn_details")[1].update({ "qty": 3.0, "transfer_qty": 3.0, "s_warehouse": "_Test Warehouse 1 - _TC", "incoming_rate": 1.0 }) # check for stopped status of Material Request se = frappe.copy_doc(se_doc) self.assertRaises(frappe.MappingMismatchError, se.insert) def test_warehouse_company_validation(self): from erpnext.stock.utils import InvalidWarehouseCompany mr = frappe.copy_doc(test_records[0]) mr.company = "_Test Company 1" self.assertRaises(InvalidWarehouseCompany, mr.insert) def _get_requested_qty(self, item_code, warehouse): return flt(frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse}, "indented_qty")) test_dependencies = ["Currency Exchange"] test_records = frappe.get_test_records('Material Request') """ Acceptance tests for Studio's Setting pages """ import re from .base_studio_test import StudioCourseTest from ...pages.studio.settings_certificates import CertificatesPage from ...pages.studio.settings_advanced import AdvancedSettingsPage class CertificatesTest(StudioCourseTest): """ Tests for settings/certificates Page. """ def setUp(self): # pylint: disable=arguments-differ super(CertificatesTest, self).setUp(is_staff=True) self.certificates_page = CertificatesPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self.advanced_settings_page = AdvancedSettingsPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self.course_advanced_settings = dict() def make_signatory_data(self, prefix='First'): """ Makes signatory dict which can be used in the tests to create certificates """ return { 'name': '{prefix} Signatory Name'.format(prefix=prefix), 'title': '{prefix} Signatory Title'.format(prefix=prefix), 'organization': '{prefix} Signatory Organization'.format(prefix=prefix), } def create_and_verify_certificate(self, course_title_override, existing_certs, signatories): """ Creates a new certificate and verifies that it was properly created. """ self.assertEqual(existing_certs, len(self.certificates_page.certificates)) if existing_certs == 0: self.certificates_page.wait_for_first_certificate_button() self.certificates_page.click_first_certificate_button() else: self.certificates_page.wait_for_add_certificate_button() self.certificates_page.click_add_certificate_button() certificate = self.certificates_page.certificates[existing_certs] # Set the certificate properties certificate.course_title = course_title_override # add signatories added_signatories = 0 for idx, signatory in enumerate(signatories): certificate.signatories[idx].name = signatory['name'] certificate.signatories[idx].title = signatory['title'] certificate.signatories[idx].organization = signatory['organization'] certificate.signatories[idx].upload_signature_image('Signature-{}.png'.format(idx)) added_signatories += 1 if len(signatories) > added_signatories: certificate.click_add_signatory_button() # Save the certificate self.assertEqual(certificate.get_text('.action-primary'), "Create") certificate.click_create_certificate_button() self.assertIn(course_title_override, certificate.course_title) return certificate def test_no_certificates_by_default(self): """ Scenario: Ensure that message telling me to create a new certificate is shown when no certificate exist. Given I have a course without certificates When I go to the Certificates page in Studio Then I see "You have not created any certificates yet." message and a link with text "Set up your certificate" """ self.certificates_page.visit() self.assertTrue(self.certificates_page.no_certificates_message_shown) self.assertIn( "You have not created any certificates yet.", self.certificates_page.no_certificates_message_text ) self.assertIn( "Set up your certificate", self.certificates_page.new_certificate_link_text ) def test_can_create_and_edit_certficate(self): """ Scenario: Ensure that the certificates can be created and edited correctly. Given I have a course without certificates When I click button 'Add your first Certificate' And I set new the course title override and signatory and click the button 'Create' Then I see the new certificate is added and has correct data When I edit the certificate And I change the name and click the button 'Save' Then I see the certificate is saved successfully and has the new name """ self.certificates_page.visit() self.certificates_page.wait_for_first_certificate_button() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [self.make_signatory_data('first'), self.make_signatory_data('second')] ) # Edit the certificate certificate.click_edit_certificate_button() certificate.course_title = "Updated Course Title Override 2" self.assertEqual(certificate.get_text('.action-primary'), "Save") certificate.click_save_certificate_button() self.assertIn("Updated Course Title Override 2", certificate.course_title) def test_can_delete_certificate(self): """ Scenario: Ensure that the user can delete certificate. Given I have a course with 1 certificate And I go to the Certificates page When I delete the Certificate with name "New Certificate" Then I see that there is no certificate When I refresh the page Then I see that the certificate has been deleted """ self.certificates_page.visit() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [self.make_signatory_data('first'), self.make_signatory_data('second')] ) certificate.wait_for_certificate_delete_button() self.assertEqual(len(self.certificates_page.certificates), 1) # Delete the certificate we just created certificate.click_delete_certificate_button() self.certificates_page.click_confirmation_prompt_primary_button() # Reload the page and confirm there are no certificates self.certificates_page.visit() self.assertEqual(len(self.certificates_page.certificates), 0) def test_can_create_and_edit_signatories_of_certficate(self): """ Scenario: Ensure that the certificates can be created with signatories and edited correctly. Given I have a course without certificates When I click button 'Add your first Certificate' And I set new the course title override and signatory and click the button 'Create' Then I see the new certificate is added and has one signatory inside it When I click 'Edit' button of signatory panel And I set the name and click the button 'Save' icon Then I see the signatory name updated with newly set name When I refresh the certificates page Then I can see course has one certificate with new signatory name When I click 'Edit' button of signatory panel And click on 'Close' button Then I can see no change in signatory detail """ self.certificates_page.visit() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [self.make_signatory_data('first')] ) self.assertEqual(len(self.certificates_page.certificates), 1) # Edit the signatory in certificate signatory = certificate.signatories[0] signatory.edit() signatory.name = 'Updated signatory name' signatory.title = 'Update signatory title' signatory.organization = 'Updated signatory organization' signatory.save() self.assertEqual(len(self.certificates_page.certificates), 1) #Refreshing the page, So page have the updated certificate object. self.certificates_page.refresh() signatory = self.certificates_page.certificates[0].signatories[0] self.assertIn("Updated signatory name", signatory.name) self.assertIn("Update signatory title", signatory.title) self.assertIn("Updated signatory organization", signatory.organization) signatory.edit() signatory.close() self.assertIn("Updated signatory name", signatory.name) def test_can_cancel_creation_of_certificate(self): """ Scenario: Ensure that creation of a certificate can be canceled correctly. Given I have a course without certificates When I click button 'Add your first Certificate' And I set name of certificate and click the button 'Cancel' Then I see that there is no certificates in the course """ self.certificates_page.visit() self.certificates_page.click_first_certificate_button() certificate = self.certificates_page.certificates[0] certificate.course_title = "Title Override" certificate.click_cancel_edit_certificate() self.assertEqual(len(self.certificates_page.certificates), 0) def test_line_breaks_in_signatory_title(self): """ Scenario: Ensure that line breaks are properly reflected in certificate Given I have a certificate with signatories When I add signatory title with new line character Then I see line break in certificate title """ self.certificates_page.visit() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [ { 'name': 'Signatory Name', 'title': 'Signatory title with new line character \n', 'organization': 'Signatory Organization', } ] ) certificate.wait_for_certificate_delete_button() # Make sure certificate is created self.assertEqual(len(self.certificates_page.certificates), 1) signatory_title = self.certificates_page.get_first_signatory_title() self.assertNotEqual([], re.findall(r'', signatory_title)) def test_course_number_in_certificate_details_view(self): """ Scenario: Ensure that Course Number is displayed in certificate details view Given I have a certificate When I visit certificate details page on studio Then I see Course Number next to Course Name """ self.certificates_page.visit() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [self.make_signatory_data('first')] ) certificate.wait_for_certificate_delete_button() # Make sure certificate is created self.assertEqual(len(self.certificates_page.certificates), 1) course_number = self.certificates_page.get_course_number() self.assertEqual(self.course_info['number'], course_number) def test_course_number_override_in_certificate_details_view(self): """ Scenario: Ensure that Course Number Override is displayed in certificate details view Given I have a certificate When I visit certificate details page on studio Then I see Course Number Override next to Course Name """ self.course_advanced_settings.update( {'Course Number Display String': 'Course Number Override String'} ) self.certificates_page.visit() certificate = self.create_and_verify_certificate( "Course Title Override", 0, [self.make_signatory_data('first')] ) certificate.wait_for_certificate_delete_button() # Make sure certificate is created self.assertEqual(len(self.certificates_page.certificates), 1) # set up course number override in Advanced Settings Page self.advanced_settings_page.visit() self.advanced_settings_page.set_values(self.course_advanced_settings) self.advanced_settings_page.wait_for_ajax() self.certificates_page.visit() course_number_override = self.certificates_page.get_course_number_override() self.assertEqual(self.course_advanced_settings['Course Number Display String'], course_number_override) # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'example12.ui' # # Created: Sun Jan 18 18:25:33 2015 # by: PyQt4 UI code generator 4.9.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(534, 613) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.mdiArea = QtGui.QMdiArea(self.centralwidget) self.mdiArea.setGeometry(QtCore.QRect(20, 20, 441, 421)) self.mdiArea.setObjectName(_fromUtf8("mdiArea")) self.subwindow = QtGui.QWidget() self.subwindow.setObjectName(_fromUtf8("subwindow")) self.label = QtGui.QLabel(self.subwindow) self.label.setGeometry(QtCore.QRect(50, 30, 66, 17)) self.label.setObjectName(_fromUtf8("label")) self.subwindow_2 = QtGui.QWidget() self.subwindow_2.setObjectName(_fromUtf8("subwindow_2")) self.label_2 = QtGui.QLabel(self.subwindow_2) self.label_2.setGeometry(QtCore.QRect(80, 30, 66, 17)) self.label_2.setObjectName(_fromUtf8("label_2")) self.pushButton_next = QtGui.QPushButton(self.centralwidget) self.pushButton_next.setGeometry(QtCore.QRect(20, 460, 98, 27)) self.pushButton_next.setObjectName(_fromUtf8("pushButton_next")) self.pushButton__back = QtGui.QPushButton(self.centralwidget) self.pushButton__back.setGeometry(QtCore.QRect(190, 460, 98, 27)) self.pushButton__back.setObjectName(_fromUtf8("pushButton__back")) self.pushButton__close = QtGui.QPushButton(self.centralwidget) self.pushButton__close.setGeometry(QtCore.QRect(350, 460, 98, 27)) self.pushButton__close.setObjectName(_fromUtf8("pushButton__close")) self.pushButton_cascade = QtGui.QPushButton(self.centralwidget) self.pushButton_cascade.setGeometry(QtCore.QRect(20, 500, 98, 27)) self.pushButton_cascade.setObjectName(_fromUtf8("pushButton_cascade")) self.pushButton_tail = QtGui.QPushButton(self.centralwidget) self.pushButton_tail.setGeometry(QtCore.QRect(140, 500, 98, 27)) self.pushButton_tail.setObjectName(_fromUtf8("pushButton_tail")) self.pushButton_subwindow = QtGui.QPushButton(self.centralwidget) self.pushButton_subwindow.setGeometry(QtCore.QRect(260, 500, 121, 27)) self.pushButton_subwindow.setObjectName(_fromUtf8("pushButton_subwindow")) self.pushButton_tab = QtGui.QPushButton(self.centralwidget) self.pushButton_tab.setGeometry(QtCore.QRect(400, 500, 98, 27)) self.pushButton_tab.setObjectName(_fromUtf8("pushButton_tab")) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 534, 25)) self.menubar.setObjectName(_fromUtf8("menubar")) MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8)) self.subwindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Subwindow", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.subwindow_2.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Subwindow", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_next.setText(QtGui.QApplication.translate("MainWindow", "Next", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton__back.setText(QtGui.QApplication.translate("MainWindow", "Back", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton__close.setText(QtGui.QApplication.translate("MainWindow", "Close All", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_cascade.setText(QtGui.QApplication.translate("MainWindow", "Cascade", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_tail.setText(QtGui.QApplication.translate("MainWindow", "Tail", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_subwindow.setText(QtGui.QApplication.translate("MainWindow", "View Subwindow", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_tab.setText(QtGui.QApplication.translate("MainWindow", "View Tab", None, QtGui.QApplication.UnicodeUTF8)) #!/usr/bin/env python # -*- coding: utf-8 -*- # # container # # Copyright (c) 2008-2011 University of Dundee. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # # Author: Aleksandra Tarkowska , 2008. # # Version: 1.0 # import omero from omero.rtypes import * from django.conf import settings from django.core.urlresolvers import reverse from django.utils.encoding import smart_str import logging logger = logging.getLogger(__name__) from webclient.controller import BaseController class BaseContainer(BaseController): project = None screen = None dataset = None plate = None acquisition = None well = None image = None tag = None file = None comment = None tags = None index = None containers = None experimenter = None c_size = 0 text_annotations = None txannSize = 0 long_annotations = None file_annotations = None orphaned = False def __init__(self, conn, project=None, dataset=None, image=None, screen=None, plate=None, acquisition=None, well=None, tag=None, tagset=None, file=None, comment=None, annotation=None, index=None, orphaned=None, **kw): BaseController.__init__(self, conn) if project is not None: self.project = self.conn.getObject("Project", project) self.assertNotNone(self.project, project, "Project") self.assertNotNone(self.project._obj, project, "Project") if dataset is not None: self.dataset = self.conn.getObject("Dataset", dataset) self.assertNotNone(self.dataset, dataset, "Dataset") self.assertNotNone(self.dataset._obj, dataset, "Dataset") if screen is not None: self.screen = self.conn.getObject("Screen", screen) self.assertNotNone(self.screen, screen, "Screen") self.assertNotNone(self.screen._obj, screen, "Screen") if plate is not None: self.plate = self.conn.getObject("Plate", plate) self.assertNotNone(self.plate, plate, "Plate") self.assertNotNone(self.plate._obj, plate, "Plate") if acquisition is not None: self.acquisition = self.conn.getObject("PlateAcquisition", acquisition) self.assertNotNone(self.acquisition, acquisition, "Plate Acquisition") self.assertNotNone(self.acquisition._obj, acquisition, "Plate Acquisition") if image is not None: self.image = self.conn.getObject("Image", image) self.assertNotNone(self.image, image, "Image") self.assertNotNone(self.image._obj, image, "Image") if well is not None: self.well = self.conn.getObject("Well", well) self.assertNotNone(self.well, well, "Well") self.assertNotNone(self.well._obj, well, "Well") if index is not None: self.well.index = index if tag is not None: self.tag = self.conn.getObject("Annotation", tag) self.assertNotNone(self.tag, tag, "Tag") self.assertNotNone(self.tag._obj, tag, "Tag") if tagset is not None: self.tag = self.conn.getObject("Annotation", tagset) self.assertNotNone(self.tag, tagset, "Tag") self.assertNotNone(self.tag._obj, tagset, "Tag") if comment is not None: self.comment = self.conn.getObject("Annotation", comment) self.assertNotNone(self.comment, comment, "Comment") self.assertNotNone(self.comment._obj, comment, "Comment") if file is not None: self.file = self.conn.getObject("Annotation", file) self.assertNotNone(self.file, file, "File") self.assertNotNone(self.file._obj, file, "File") if annotation is not None: self.annotation = self.conn.getObject("Annotation", annotation) self.assertNotNone(self.annotation, annotation, "Annotation") self.assertNotNone(self.annotation._obj, annotation, "Annotation") if orphaned: self.orphaned = True def assertNotNone(self, obj, obj_id, obj_name): if obj is None: raise AttributeError("We are sorry, but that %s (id:%s) does not exist, or if it does, you have no permission to see it." % (obj_name, obj_id)) def _get_object(self): """ Since the container is often used to wrap a single Project, Dataset etc, several methods need access to the underlying object. E.g. obj_type(), obj_id(), canAnnotate(), canEdit(). This removes many if statements from the metadata_general.html template for places that are displaying data for a single Object. E.g. Edit Name etc. """ if self.project is not None: return self.project if self.dataset is not None: return self.dataset if self.image is not None: return self.image if self.screen is not None: return self.screen if self.acquisition is not None: return self.acquisition if self.plate is not None: return self.plate if self.well is not None: return self.well if self.tag is not None: return self.tag if self.file is not None: return self.file def obj_type(self): if self.project is not None: return "project" if self.dataset is not None: return "dataset" if self.image is not None: return "image" if self.screen is not None: return "screen" if self.acquisition is not None: return "acquisition" if self.plate is not None: return "plate" if self.well is not None: return "well" if self.tag is not None: return "tag" if self.file is not None: return "file" def obj_id(self): obj = self._get_object() return obj is not None and obj.id or None def canAnnotate(self): obj = self._get_object() return obj is not None and obj.canAnnotate() or False def canEdit(self): obj = self._get_object() return obj is not None and obj.canEdit() or None def getPermsCss(self): """ Shortcut to get permissions flags, E.g. for css """ return self._get_object().getPermsCss() def getNumberOfFields(self): """ Applies to Plates (all fields) or PlateAcquisitions""" if self.plate is not None: return self.plate.getNumberOfFields() elif self.acquisition: p = self.conn.getObject("Plate", self.acquisition._obj.plate.id.val) return p.getNumberOfFields(self.acquisition.getId()) def getPlateId(self): """ Used by templates that display Plates or PlateAcquisitions """ if self.plate is not None: return self.plate.getId() elif self.acquisition: return self.acquisition._obj.plate.id.val def listFigureScripts(self, objDict=None): """ This configures all the Figure Scripts, setting their enabled status given the currently selected object (self.image etc) or batch objects (uses objDict). """ figureScripts = [] # id is used in url and is mapped to full script path by views.figure_script() splitView = {'id': 'SplitView', 'name':'Split View Figure', 'enabled': False, 'tooltip': "Create a figure of images, splitting their channels into separate views"} # Split View Figure is enabled if we have at least one image with SizeC > 1 if self.image: splitView['enabled'] = (self.image.getSizeC() > 1) elif objDict is not None: if 'image' in objDict: for i in objDict['image']: if i.getSizeC() > 1: splitView['enabled'] = True break thumbnailFig = {'id': 'Thumbnail', 'name': 'Thumbnail Figure', 'enabled': False, 'tooltip': "Export a figure of thumbnails, optionally sorted by tag"} # Thumbnail figure is enabled if we have Datasets or Images selected if self.image or self.dataset: thumbnailFig['enabled'] = True elif objDict is not None: if 'image' in objDict or 'dataset' in objDict: thumbnailFig['enabled'] = True figureScripts.append(splitView) figureScripts.append(thumbnailFig) return figureScripts def openAstexViewerCompatible(self): """ Is the image suitable to be viewed with the Volume viewer 'Open Astex Viewer' applet? Image must be a 'volume' of suitable dimensions and not too big. """ MAX_SIDE = settings.OPEN_ASTEX_MAX_SIDE # default is 400 MIN_SIDE = settings.OPEN_ASTEX_MIN_SIDE # default is 20 MAX_VOXELS = settings.OPEN_ASTEX_MAX_VOXELS # default is 15625000 (250 * 250 * 250) if self.image is None: return False sizeZ = self.image.getSizeZ() if self.image.getSizeC() > 1: return False sizeX = self.image.getSizeX() sizeY = self.image.getSizeY() if sizeZ < MIN_SIDE or sizeX < MIN_SIDE or sizeY < MIN_SIDE: return False if sizeX > MAX_SIDE or sizeY > MAX_SIDE or sizeZ > MAX_SIDE: return False voxelCount = (sizeX * sizeY * sizeZ) if voxelCount > MAX_VOXELS: return False try: # if scipy ndimage is not available for interpolation, can only handle smaller images import scipy.ndimage except ImportError: logger.debug("Failed to import scipy.ndimage - Open Astex Viewer limited to display of smaller images.") MAX_VOXELS = (160 * 160 * 160) if voxelCount > MAX_VOXELS: return False return True def formatMetadataLine(self, l): if len(l) < 1: return None return l.split("=") def originalMetadata(self): # TODO: hardcoded values. self.global_metadata = list() self.series_metadata = list() self.companion_files = list() if self.image is not None: om = self.image.loadOriginalMetadata() elif self.well.getWellSample().image is not None: om = self.well.getWellSample().image().loadOriginalMetadata() if om is not None: self.original_metadata = om[0] self.global_metadata = om[1] self.series_metadata = om[2] # Look for companion files on the Image if self.image is not None: comp_obj = self.image p = self.image.getPlate() # in SPW model, companion files can be found on Plate if p is not None: comp_obj = p for ann in comp_obj.listAnnotations(): if hasattr(ann._obj, "file") and ann.ns == omero.constants.namespaces.NSCOMPANIONFILE: if ann.getFileName() != omero.constants.annotation.file.ORIGINALMETADATA: self.companion_files.append(ann) def channelMetadata(self): self.channel_metadata = None try: if self.image is not None: self.channel_metadata = self.image.getChannels() elif self.well is not None: self.channel_metadata = self.well.getWellSample().image().getChannels() except: pass if self.channel_metadata is None: self.channel_metadata = list() def loadTags(self, eid=None): if eid is not None: if eid == -1: # Load data for all users eid = None else: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId self.tags = list(self.conn.listTags(eid)) self.t_size = len(self.tags) def loadDataByTag(self): pr_list = list(self.conn.getObjectsByAnnotations('Project',[self.tag.id])) ds_list = list(self.conn.getObjectsByAnnotations('Dataset',[self.tag.id])) im_list = list(self.conn.getObjectsByAnnotations('Image',[self.tag.id])) sc_list = list(self.conn.getObjectsByAnnotations('Screen',[self.tag.id])) pl_list = list(self.conn.getObjectsByAnnotations('Plate',[self.tag.id])) pa_list = list(self.conn.getObjectsByAnnotations('PlateAcquisition',[self.tag.id])) pr_list.sort(key=lambda x: x.getName() and x.getName().lower()) ds_list.sort(key=lambda x: x.getName() and x.getName().lower()) im_list.sort(key=lambda x: x.getName() and x.getName().lower()) sc_list.sort(key=lambda x: x.getName() and x.getName().lower()) pl_list.sort(key=lambda x: x.getName() and x.getName().lower()) pa_list.sort(key=lambda x: x.getName() and x.getName().lower()) self.containers={'projects': pr_list, 'datasets': ds_list, 'images': im_list, 'screens':sc_list, 'plates':pl_list, 'aquisitions': pa_list} self.c_size = len(pr_list)+len(ds_list)+len(im_list)+len(sc_list)+len(pl_list)+len(pa_list) def listImagesInDataset(self, did, eid=None, page=None, load_pixels=False): if eid is not None: if eid == -1: # Load data for all users eid = None else: self.experimenter = self.conn.getObject("Experimenter", eid) im_list = list(self.conn.listImagesInDataset(oid=did, eid=eid, page=page, load_pixels=load_pixels)) im_list.sort(key=lambda x: x.getName().lower()) self.containers = {'images': im_list} self.c_size = self.conn.getCollectionCount("Dataset", "imageLinks", [long(did)])[long(did)] if page is not None: self.paging = self.doPaging(page, len(im_list), self.c_size) def listContainerHierarchy(self, eid=None): if eid is not None: if eid == -1: eid = None else: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId pr_list = list(self.conn.listProjects(eid)) ds_list = list(self.conn.listOrphans("Dataset", eid)) sc_list = list(self.conn.listScreens(eid)) pl_list = list(self.conn.listOrphans("Plate", eid)) pr_list.sort(key=lambda x: x.getName() and x.getName().lower()) ds_list.sort(key=lambda x: x.getName() and x.getName().lower()) sc_list.sort(key=lambda x: x.getName() and x.getName().lower()) pl_list.sort(key=lambda x: x.getName() and x.getName().lower()) self.orphans = self.conn.countOrphans("Image", eid) self.containers={'projects': pr_list, 'datasets': ds_list, 'screens': sc_list, 'plates': pl_list} self.c_size = len(pr_list)+len(ds_list)+len(sc_list)+len(pl_list) def listOrphanedImages(self, eid=None, page=None): if eid is not None: if eid == -1: eid = None else: self.experimenter = self.conn.getObject("Experimenter", eid) else: eid = self.conn.getEventContext().userId params = omero.sys.ParametersI() if page is not None: params.page((int(page)-1)*settings.PAGE, settings.PAGE) im_list = list(self.conn.listOrphans("Image", eid=eid, params=params)) im_list.sort(key=lambda x: x.getName().lower()) self.containers = {'orphaned': True, 'images': im_list} self.c_size = self.conn.countOrphans("Image", eid=eid) if page is not None: self.paging = self.doPaging(page, len(im_list), self.c_size) # Annotation list def annotationList(self): self.text_annotations = list() self.rating_annotations = list() self.file_annotations = list() self.tag_annotations = list() self.xml_annotations = list() self.boolean_annotations = list() self.double_annotations = list() self.long_annotations = list() self.term_annotations = list() self.time_annotations = list() self.companion_files = list() annTypes = {omero.model.CommentAnnotationI: self.text_annotations, omero.model.LongAnnotationI: self.long_annotations, omero.model.FileAnnotationI: self.file_annotations, omero.model.TagAnnotationI: self.tag_annotations, omero.model.XmlAnnotationI: self.xml_annotations, omero.model.BooleanAnnotationI: self.boolean_annotations, omero.model.DoubleAnnotationI: self.double_annotations, omero.model.TermAnnotationI: self.term_annotations, omero.model.TimestampAnnotationI: self.time_annotations} aList = list() if self.image is not None: aList = list(self.image.listAnnotations()) elif self.dataset is not None: aList = list(self.dataset.listAnnotations()) elif self.project is not None: aList = list(self.project.listAnnotations()) elif self.screen is not None: aList = list(self.screen.listAnnotations()) elif self.plate is not None: aList = list(self.plate.listAnnotations()) elif self.acquisition is not None: aList = list(self.acquisition.listAnnotations()) elif self.well is not None: aList = list(self.well.getWellSample().image().listAnnotations()) for ann in aList: annClass = ann._obj.__class__ if annClass in annTypes: if ann.ns == omero.constants.metadata.NSINSIGHTRATING: self.rating_annotations.append(ann) elif ann.ns == omero.constants.namespaces.NSCOMPANIONFILE: if ann.getFileName() != omero.constants.annotation.file.ORIGINALMETADATA: self.companion_files.append(ann) else: annTypes[annClass].append(ann) self.text_annotations.sort(key=lambda x: x.creationEventDate(), reverse=True) self.file_annotations.sort(key=lambda x: x.creationEventDate()) self.rating_annotations.sort(key=lambda x: x.creationEventDate()) self.tag_annotations.sort(key=lambda x: x.textValue) self.txannSize = len(self.text_annotations) self.fileannSize = len(self.file_annotations) self.tgannSize = len(self.tag_annotations) def canUseOthersAnns(self): """ Test to see whether other user's Tags, Files etc should be provided for annotating. Used to ensure that E.g. Group Admins / Owners don't try to link other user's Annotations when in a private group (even though they could retrieve those annotations) """ gid = self.conn.SERVICE_OPTS.getOmeroGroup() if gid is None: return False try: group = self.conn.getObject("ExperimenterGroup", long(gid)) except: return False if group is None: return False perms = str(group.getDetails().getPermissions()) rv = False if perms in ("rwrw--", "rwra--"): return True if perms == "rwr---" and (self.conn.isAdmin() or self.conn.isLeader(group.id)): return True return False def loadBatchAnnotations(self, objDict, ann_ids=None, addedByMe=False): """ Look up the Tags, Files, Comments, Ratings etc that are on one or more of the objects in objDect. """ batchAnns = { omero.model.CommentAnnotationI: 'Comment', omero.model.LongAnnotationI: 'Long', omero.model.FileAnnotationI: 'File', omero.model.TagAnnotationI: 'Tag', omero.model.XmlAnnotationI: 'Xml', omero.model.BooleanAnnotationI: 'Boolean', omero.model.DoubleAnnotationI: 'Double', omero.model.TermAnnotationI: 'Term', omero.model.TimestampAnnotationI: 'TimeStamp' } # return, E.g {"Tag": {AnnId: {'ann': ObjWrapper, 'parents': [ImageWrapper, etc] } }, etc...} rv = {} # populate empty return map for key, value in batchAnns.items(): rv[value] = {} params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() if addedByMe: params.theFilter.ownerId = omero.rtypes.rlong(self.conn.getUserId()) for objType, objList in objDict.items(): if len(objList) == 0: continue parent_ids = [o.getId() for o in objList] # If we're working with a 'well', we're actually annotating the image for i in range(len(objList)): o = objList[i] if isinstance(o._obj, omero.model.WellI): objType = "Image" parent_ids[i] = o.getWellSample().image().getId() # index has already been set if isinstance(objList[0]._obj, omero.model.PlateAcquisitionI): objType = 'PlateAcquisition' for annLink in self.conn.getAnnotationLinks(objType, parent_ids=parent_ids, ann_ids=ann_ids, params=params): ann = annLink.getAnnotation() if ann.ns == omero.constants.metadata.NSINSIGHTRATING: continue # TODO: Handle ratings if ann.ns == omero.constants.namespaces.NSCOMPANIONFILE: continue annClass = ann._obj.__class__ if annClass in batchAnns: annotationsMap = rv[ batchAnns[annClass] ] # E.g. map for 'Tags' if ann.getId() not in annotationsMap: annotationsMap[ann.getId()] = { 'ann': ann, 'links': [annLink], 'unlink': 0} else: annotationsMap[ann.getId()]['links'].append( annLink ) if annLink.canDelete(): annotationsMap[ann.getId()]['unlink'] += 1 # bit more preparation for display... batchAnns = {} for key, annMap in rv.items(): # E.g. key = 'Tag', 'Comment', 'File' etc annList = [] for annId, annDict in annMap.items(): # ann is {'ann':AnnWrapper, 'links'[AnnotationLinkWrapper, ..]} annDict['links'].sort(key=lambda x: x.parent.id.val) # Each ann has links to several objects annDict['can_remove'] = annDict['unlink'] > 0 annList.append(annDict) batchAnns[key] = annList return batchAnns def getTagsByObject(self, parent_type=None, parent_ids=None): eid = (not self.canUseOthersAnns()) and self.conn.getEventContext().userId or None def sort_tags(tag_gen): tag_anns = list(tag_gen) try: tag_anns.sort(key=lambda x: x.getValue().lower()) except: pass return tag_anns if self.image is not None: return sort_tags(self.image.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.dataset is not None: return sort_tags(self.dataset.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.project is not None: return sort_tags(self.project.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.well is not None: return sort_tags(self.well.getWellSample().image().listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.plate is not None: return sort_tags(self.plate.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.screen is not None: return sort_tags(self.screen.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif self.acquisition is not None: return sort_tags(self.acquisition.listOrphanedAnnotations(eid=eid, anntype='Tag')) elif parent_type and parent_ids: parent_type = parent_type.title() if parent_type == "Acquisition": parent_type = "PlateAcquisition" return sort_tags(self.conn.listOrphanedAnnotations(parent_type, parent_ids, eid=eid, anntype='Tag')) else: if eid is not None: params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = omero.rtypes.rlong(eid) return sort_tags(self.conn.getObjects("TagAnnotation", params=params)) return sort_tags(self.conn.getObjects("TagAnnotation")) def getFilesByObject(self, parent_type=None, parent_ids=None): eid = (not self.canUseOthersAnns()) and self.conn.getEventContext().userId or None ns = [omero.constants.namespaces.NSCOMPANIONFILE, omero.constants.namespaces.NSEXPERIMENTERPHOTO] def sort_file_anns(file_ann_gen): file_anns = list(file_ann_gen) try: file_anns.sort(key=lambda x: x.getFile().getName().lower()) except: pass return file_anns if self.image is not None: return sort_file_anns(self.image.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.dataset is not None: return sort_file_anns(self.dataset.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.project is not None: return sort_file_anns(self.project.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.well is not None: return sort_file_anns(self.well.getWellSample().image().listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.plate is not None: return sort_file_anns(self.plate.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.screen is not None: return sort_file_anns(self.screen.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif self.acquisition is not None: return sort_file_anns(self.acquisition.listOrphanedAnnotations(eid=eid, ns=ns, anntype='File')) elif parent_type and parent_ids: parent_type = parent_type.title() if parent_type == "Acquisition": parent_type = "PlateAcquisition" return sort_file_anns(self.conn.listOrphanedAnnotations(parent_type, parent_ids, eid=eid, ns=ns, anntype='File')) else: return sort_file_anns(self.conn.listFileAnnotations(eid=eid)) #################################################################### # Creation def createDataset(self, name, description=None, img_ids=None): ds = omero.model.DatasetI() ds.name = rstring(str(name)) if description is not None and description != "" : ds.description = rstring(str(description)) if self.project is not None: l_ds = omero.model.ProjectDatasetLinkI() l_ds.setParent(self.project._obj) l_ds.setChild(ds) ds.addProjectDatasetLink(l_ds) dsid = self.conn.saveAndReturnId(ds) if img_ids is not None: iids = [int(i) for i in img_ids.split(",")] links = [] for iid in iids: link = omero.model.DatasetImageLinkI() link.setParent(omero.model.DatasetI(dsid, False)) link.setChild(omero.model.ImageI(iid, False)) links.append(link) self.conn.saveArray(links) return dsid def createProject(self, name, description=None): pr = omero.model.ProjectI() pr.name = rstring(str(name)) if description is not None and description != "" : pr.description = rstring(str(description)) return self.conn.saveAndReturnId(pr) def createScreen(self, name, description=None): sc = omero.model.ScreenI() sc.name = rstring(str(name)) if description is not None and description != "" : sc.description = rstring(str(description)) return self.conn.saveAndReturnId(sc) def checkMimetype(self, file_type): if file_type is None or len(file_type) == 0: file_type = "application/octet-stream" return file_type def createCommentAnnotations(self, content, oids, well_index=0): ann = omero.model.CommentAnnotationI() ann.textValue = rstring(str(content)) ann = self.conn.saveAndReturnObject(ann) new_links = list() for k in oids.keys(): if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample(well_index).image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(ann._obj) new_links.append(l_ann) if len(new_links) > 0 : self.conn.saveArray(new_links) return self.conn.getObject("CommentAnnotation", ann.getId()) def createTagAnnotations(self, tag, desc, oids, well_index=0): """ Creates a new tag (with description) OR uses existing tag with the specified name if found. Links the tag to the specified objects. @param tag: Tag text/name @param desc: Tag description @param oids: Dict of Objects and IDs. E.g. {"Image": [1,2,3], "Dataset", [6]} """ ann = None try: ann = self.conn.findTag(tag, desc) except: pass if ann is None: ann = omero.model.TagAnnotationI() ann.textValue = rstring(str(tag)) ann.setDescription(rstring(str(desc))) ann = self.conn.saveAndReturnObject(ann) new_links = list() parent_objs = [] for k in oids: if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample(well_index).image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob parent_objs.append(obj) l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(ann._obj) new_links.append(l_ann) if len(new_links) > 0 : # If we retrieved an existing Tag above, link may already exist... try: self.conn.saveArray(new_links) except omero.ValidationException, x: for l in new_links: try: self.conn.saveObject(l) except: pass return ann.getId() def createFileAnnotations(self, newFile, oids, well_index=0): format = self.checkMimetype(newFile.content_type) oFile = omero.model.OriginalFileI() oFile.setName(rstring(smart_str(newFile.name))); oFile.setPath(rstring(smart_str(newFile.name))); oFile.hasher = omero.model.ChecksumAlgorithmI() oFile.hasher.value = omero.rtypes.rstring("SHA1-160") oFile.setMimetype(rstring(str(format))); ofid = self.conn.saveAndReturnId(oFile); of = self.conn.saveAndReturnFile(newFile, ofid) fa = omero.model.FileAnnotationI() fa.setFile(of) fa = self.conn.saveAndReturnObject(fa) new_links = list() otype = None # needed if we only have a single Object for k in oids: if len(oids[k]) > 0: for ob in oids[k]: if isinstance(ob._obj, omero.model.WellI): t = 'Image' obj = ob.getWellSample(well_index).image() elif isinstance(ob._obj, omero.model.PlateAcquisitionI): t = 'PlateAcquisition' obj = ob else: t = k.lower().title() obj = ob otype = t l_ann = getattr(omero.model, t+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(fa._obj) new_links.append(l_ann) if len(new_links) > 0 : new_links = self.conn.getUpdateService().saveAndReturnArray(new_links, self.conn.SERVICE_OPTS) return fa.getId() def createAnnotationsLinks(self, atype, tids, oids, well_index=0): """ Links existing annotations to 1 or more objects @param atype: Annotation type E.g. "tag", "file" @param tids: Annotation IDs @param oids: Dict of Objects and IDs. E.g. {"Image": [1,2,3], "Dataset", [6]} """ atype = str(atype).lower() if not atype.lower() in ("tag", "comment", "file"): raise AttributeError("Object type must be: tag, comment, file.") new_links = list() annotations = list(self.conn.getObjects("Annotation", tids)) parent_objs = [] for k in oids: if len(oids[k]) > 0: if k.lower() == 'acquisition': parent_type = 'PlateAcquisition' else: parent_type = k.lower().title() parent_ids = [o.id for o in oids[k]] # check for existing links belonging to Current user params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() params.theFilter.ownerId = rlong(self.conn.getUserId()) links = self.conn.getAnnotationLinks (parent_type, parent_ids=parent_ids, ann_ids=tids, params=params) pcLinks = [(l.parent.id.val, l.child.id.val) for l in links] # Create link between each object and annotation for ob in self.conn.getObjects(parent_type, parent_ids): parent_objs.append(ob) for a in annotations: if (ob.id, a.id) in pcLinks: continue # link already exists if isinstance(ob._obj, omero.model.WellI): parent_type = 'Image' obj = ob.getWellSample(well_index).image() else: obj = ob l_ann = getattr(omero.model, parent_type+"AnnotationLinkI")() l_ann.setParent(obj._obj) l_ann.setChild(a._obj) new_links.append(l_ann) failed = 0 saved_links = [] try: # will fail if any of the links already exist saved_links = self.conn.getUpdateService().saveAndReturnArray(new_links, self.conn.SERVICE_OPTS) except omero.ValidationException, x: for l in new_links: try: saved_links.append(self.conn.getUpdateService().saveAndReturnObject(l, self.conn.SERVICE_OPTS)) except: failed+=1 return tids ################################################################ # Update def updateDescription(self, o_type, description=None): obj = getattr(self, o_type)._obj if description is not None and description != "" : obj.description = rstring(str(description)) else: obj.description = None self.conn.saveObject(obj) def updateName(self, o_type, name): obj = getattr(self, o_type)._obj if o_type not in ('tag', 'tagset'): obj.name = rstring(str(name)) else: obj.textValue = rstring(str(name)) self.conn.saveObject(obj) def updateImage(self, name, description=None): img = self.image._obj img.name = rstring(str(name)) if description is not None and description != "" : img.description = rstring(str(description)) else: img.description = None self.conn.saveObject(img) def updateDataset(self, name, description=None): container = self.dataset._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updatePlate(self, name, description=None): container = self.plate._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updateProject(self, name, description=None): container = self.project._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def updateScreen(self, name, description=None): container = self.screen._obj container.name = rstring(str(name)) if description is not None and description != "" : container.description = rstring(str(description)) else: container.description = None self.conn.saveObject(container) def move(self, parent, destination): if self.project is not None: return 'Cannot move project.' elif self.dataset is not None: if destination[0] == 'dataset': return 'Cannot move dataset to dataset' elif destination[0] == 'project': up_pdl = None pdls = self.dataset.getParentLinks() already_there = None for pdl in pdls: if pdl.parent.id.val == long(destination[1]): already_there = True if pdl.parent.id.val == long(parent[1]): up_pdl = pdl if already_there: if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_pdl._obj) else: new_pr = self.conn.getObject("Project", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl._obj) else: up_pdl = omero.model.ProjectDatasetLinkI() up_pdl.setChild(self.dataset._obj) up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl) elif destination[0] == 'experimenter': up_pdl = None for p in self.dataset.getParentLinks(): if p.parent.id.val == long(parent[1]): up_pdl = p self.conn.deleteObjectDirect(up_pdl._obj) elif destination[0] == 'orphaned': return 'Cannot move dataset to orphaned images.' else: return 'Destination not supported.' elif self.image is not None: if destination[0] == 'dataset': up_dsl = None dsls = self.image.getParentLinks() #gets every links for child already_there = None #checks links for dsl in dsls: #if is already linked to destination if dsl.parent.id.val == long(destination[1]): already_there = True # gets old parent to update or delete if dsl.parent.id.val == long(parent[1]): up_dsl = dsl if already_there: # delete link to not duplicate if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_dsl._obj) else: # update link to new destination new_ds = self.conn.getObject("Dataset", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl._obj) else: up_dsl = omero.model.DatasetImageLinkI() up_dsl.setChild(self.image._obj) up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl) elif destination[0] == 'project': return 'Cannot move image to project.' elif destination[0] == 'experimenter' or destination[0] == 'orphaned': if parent[0] != destination[0]: up_dsl = None dsls = list(self.image.getParentLinks()) #gets every links for child if len(dsls) == 1: # gets old parent to delete if dsls[0].parent.id.val == long(parent[1]): up_dsl = dsls[0] self.conn.deleteObjectDirect(up_dsl._obj) else: return 'This image is linked in multiple places. Please unlink the image first.' else: return 'Destination not supported.' elif self.screen is not None: return 'Cannot move screen.' elif self.plate is not None: if destination[0] == 'plate': return 'Cannot move plate to plate' elif destination[0] == 'screen': up_spl = None spls = self.plate.getParentLinks() already_there = None for spl in spls: if spl.parent.id.val == long(destination[1]): already_there = True if spl.parent.id.val == long(parent[1]): up_spl = spl if already_there: if long(parent[1]) != long(destination[1]): self.conn.deleteObjectDirect(up_spl._obj) else: new_sc = self.conn.getObject("Screen", destination[1]) if parent[0] not in ('experimenter', 'orphaned'): up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl._obj) else: up_spl = omero.model.ScreenPlateLinkI() up_spl.setChild(self.plate._obj) up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl) elif destination[0] == 'experimenter' or destination[0] == 'orphaned': if parent[0] != destination[0]: up_spl = None spls = list(self.plate.getParentLinks()) #gets every links for child for spl in spls: if spl.parent.id.val == long(parent[1]): self.conn.deleteObjectDirect(spl._obj) break else: return 'Destination not supported.' else: return 'No data was choosen.' return def remove( self, parents, index): """ Removes the current object (file, tag, comment, dataset, plate, image) from it's parents by manually deleting the link. For Comments, we check whether it becomes an orphan & delete if true @param parents: List of parent IDs, E.g. ['image-123'] """ for p in parents: parent = p.split('-') dtype = str(parent[0]) parentId = long(parent[1]) if dtype == "acquisition": dtype = "PlateAcquisition" if dtype == "well": dtype = "Image" w = self.conn.getObject("Well", parentId) parentId = w.getWellSample(index=index).image().getId() if self.tag: for al in self.tag.getParentLinks(dtype, [parentId]): if al is not None and al.canDelete(): self.conn.deleteObjectDirect(al._obj) elif self.file: for al in self.file.getParentLinks(dtype, [parentId]): if al is not None and al.canDelete(): self.conn.deleteObjectDirect(al._obj) elif self.comment: # remove the comment from specified parent for al in self.comment.getParentLinks(dtype, [parentId]): if al is not None and al.canDelete(): self.conn.deleteObjectDirect(al._obj) # if comment is orphan, delete it directly orphan = True for parentType in ["Project", "Dataset", "Image", "Screen", "Plate", "PlateAcquisition", "Well"]: annLinks = list(self.conn.getAnnotationLinks(parentType, ann_ids=[self.comment.id])) if len(annLinks) > 0: orphan = False break if orphan: self.conn.deleteObjectDirect(self.comment._obj) elif self.dataset is not None: if dtype == 'project': for pdl in self.dataset.getParentLinks([parentId]): if pdl is not None: self.conn.deleteObjectDirect(pdl._obj) elif self.plate is not None: if dtype == 'screen': for spl in self.plate.getParentLinks([parentId]): if spl is not None: self.conn.deleteObjectDirect(spl._obj) elif self.image is not None: if dtype == 'dataset': for dil in self.image.getParentLinks([parentId]): if dil is not None: self.conn.deleteObjectDirect(dil._obj) else: raise AttributeError("Attribute not specified. Cannot be removed.") def removemany(self, images): if self.dataset is not None: dil = self.dataset.getParentLinks('image', images) if dil is not None: self.conn.deleteObjectDirect(dil._obj) else: raise AttributeError("Attribute not specified. Cannot be removed.") ########################################################## # Copy def paste(self, destination): if self.project is not None: return 'Cannot paste project.' elif self.dataset is not None: if destination[0] == 'dataset': return 'Cannot paste dataset to dataset' elif destination[0] == 'project': pdls = self.dataset.getParentLinks() already_there = None for pdl in pdls: if pdl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Dataset is already there.' else: new_pr = self.conn.getObject("Project", destination[1]) up_pdl = omero.model.ProjectDatasetLinkI() up_pdl.setChild(self.dataset._obj) up_pdl.setParent(new_pr._obj) self.conn.saveObject(up_pdl) else: return 'Destination not supported.' elif self.image is not None: if destination[0] == 'dataset': dsls = self.image.getParentLinks() #gets every links for child already_there = None #checks links for dsl in dsls: #if is already linked to destination if dsl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Image is already there.' else: # update link to new destination new_ds = self.conn.getObject("Dataset", destination[1]) up_dsl = omero.model.DatasetImageLinkI() up_dsl.setChild(self.image._obj) up_dsl.setParent(new_ds._obj) self.conn.saveObject(up_dsl) elif destination[0] == 'project': return 'Cannot copy image to project.' else: return 'Destination not supported.' elif self.screen is not None: return 'Cannot paste screen.' elif self.plate is not None: if destination[0] == 'plate': return 'Cannot move plate to plate' elif destination[0] == 'screen': spls = self.plate.getParentLinks() already_there = None for spl in spls: if spl.parent.id.val == long(destination[1]): already_there = True if already_there: return 'Plate is already there.' else: new_sc = self.conn.getObject("Screen", destination[1]) up_spl = omero.model.ScreenPlateLinkI() up_spl.setChild(self.plate._obj) up_spl.setParent(new_sc._obj) self.conn.saveObject(up_spl) else: return 'Destination not supported.' else: return 'No data was choosen.' def copyImageToDataset(self, source, destination=None): if destination is None: dsls = self.conn.getDatasetImageLinks(source[1]) #gets every links for child for dsl in dsls: self.conn.deleteObjectDirect(dsl._obj) else: im = self.conn.getObject("Image", source[1]) ds = self.conn.getObject("Dataset", destination[1]) new_dsl = omero.model.DatasetImageLinkI() new_dsl.setChild(im._obj) new_dsl.setParent(ds._obj) self.conn.saveObject(new_dsl) def copyImagesToDataset(self, images, dataset): if dataset is not None and dataset[0] is not "dataset": ims = self.conn.getObjects("Image", images) ds = self.conn.getObject("Dataset", dataset[1]) link_array = list() for im in ims: new_dsl = omero.model.DatasetImageLinkI() new_dsl.setChild(im._obj) new_dsl.setParent(ds._obj) link_array.append(new_dsl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") def copyDatasetToProject(self, source, destination=None): if destination is not None and destination[0] is not "project": ds = self.conn.getObject("Dataset", source[1]) pr = self.conn.getObject("Project", destination[1]) new_pdl = omero.model.ProjectDatasetLinkI() new_pdl.setChild(ds._obj) new_pdl.setParent(pr._obj) self.conn.saveObject(new_pdl) raise AttributeError("Destination not supported") def copyDatasetsToProject(self, datasets, project): if project is not None and project[0] is not "project": dss = self.conn.getObjects("Dataset", datasets) pr = self.conn.getObject("Project", project[1]) link_array = list() for ds in dss: new_pdl = omero.model.ProjectDatasetLinkI() new_pdl.setChild(ds._obj) new_pdl.setParent(pr._obj) link_array.append(new_pdl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") def copyPlateToScreen(self, source, destination=None): if destination is not None and destination[0] is not "screen": pl = self.conn.getObject("Plate", source[1]) sc = self.conn.getObject("Screen", destination[1]) new_spl = omero.model.ScreenPlateLinkI() new_spl.setChild(pl._obj) new_spl.setParent(sc._obj) self.conn.saveObject(new_spl) raise AttributeError("Destination not supported") def copyPlatesToScreen(self, plates, screen): if screen is not None and screen[0] is not "screen": pls = self.conn.getObjects("Plate", plates) sc = self.conn.getObject("Screen", screen[1]) link_array = list() for pl in pls: new_spl = omero.model.ScreenPlateLinkI() new_spl.setChild(pl._obj) new_spl.setParent(sc._obj) link_array.append(new_spl) self.conn.saveArray(link_array) raise AttributeError("Destination not supported") ########################################################## # Delete def deleteItem(self, child=False, anns=False): handle = None if self.image: handle = self.conn.deleteObjects("Image", [self.image.id], deleteAnns=anns) elif self.dataset: handle = self.conn.deleteObjects("Dataset", [self.dataset.id], deleteChildren=child, deleteAnns=anns) elif self.project: handle = self.conn.deleteObjects("Project", [self.project.id], deleteChildren=child, deleteAnns=anns) elif self.screen: handle = self.conn.deleteObjects("Screen", [self.screen.id], deleteChildren=child, deleteAnns=anns) elif self.plate: handle = self.conn.deleteObjects("Plate", [self.plate.id], deleteAnns=anns) elif self.comment: handle = self.conn.deleteObjects("Annotation", [self.comment.id], deleteAnns=anns) elif self.tag: handle = self.conn.deleteObjects("Annotation", [self.tag.id], deleteAnns=anns) elif self.file: handle = self.conn.deleteObjects("Annotation", [self.file.id], deleteAnns=anns) return handle def deleteObjects(self, otype, ids, child=False, anns=False): return self.conn.deleteObjects(otype, ids, deleteChildren=child, deleteAnns=anns) #!/usr/bin/python #coding: utf-8 -*- # Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see . try: import shade from shade import meta HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_server_volume short_description: Attach/Detach Volumes from OpenStack VM's extends_documentation_fragment: openstack version_added: "2.0" author: "Monty Taylor (@emonty)" description: - Attach or Detach volumes from OpenStack VM's options: state: description: - Should the resource be present or absent. choices: [present, absent] default: present required: false server: description: - Name or ID of server you want to attach a volume to required: true volume: description: - Name or id of volume you want to attach to a server required: true device: description: - Device you want to attach. Defaults to auto finding a device name. required: false default: None requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Attaches a volume to a compute host - name: attach a volume hosts: localhost tasks: - name: attach volume to host os_server_volume: state: present cloud: mordred server: Mysql-server volume: mysql-data device: /dev/vdb ''' def _system_state_change(state, device): """Check if system state would change.""" if state == 'present': if device: return False return True if state == 'absent': if device: return True return False return False def main(): argument_spec = openstack_full_argument_spec( server=dict(required=True), volume=dict(required=True), device=dict(default=None), # None == auto choose device name state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') state = module.params['state'] wait = module.params['wait'] timeout = module.params['timeout'] try: cloud = shade.openstack_cloud(**module.params) server = cloud.get_server(module.params['server']) volume = cloud.get_volume(module.params['volume']) dev = cloud.get_volume_attach_device(volume, server.id) if module.check_mode: module.exit_json(changed=_system_state_change(state, dev)) if state == 'present': if dev: # Volume is already attached to this server module.exit_json(changed=False) cloud.attach_volume(server, volume, module.params['device'], wait=wait, timeout=timeout) server = cloud.get_server(module.params['server']) # refresh volume = cloud.get_volume(module.params['volume']) # refresh hostvars = meta.get_hostvars_from_server(cloud, server) module.exit_json( changed=True, id=volume['id'], attachments=volume['attachments'], openstack=hostvars ) elif state == 'absent': if not dev: # Volume is not attached to this server module.exit_json(changed=False) cloud.detach_volume(server, volume, wait=wait, timeout=timeout) module.exit_json( changed=True, result='Detached volume from server' ) except (shade.OpenStackCloudException, shade.OpenStackCloudTimeout) as e: module.fail_json(msg=e.message) # this is magic, see lib/ansible/module_utils/common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main() #!/usr/bin/env python # # Copyright (C) 2011 Patrick Gansterer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Checks WebKit style for ChangeLog files.""" import re from common import TabChecker from webkitpy.common.net.bugzilla import parse_bug_id_from_changelog class ChangeLogChecker(object): """Processes text lines for checking style.""" def __init__(self, file_path, handle_style_error, should_line_be_checked): self.file_path = file_path self.handle_style_error = handle_style_error self.should_line_be_checked = should_line_be_checked self._tab_checker = TabChecker(file_path, handle_style_error) def check_entry(self, first_line_checked, entry_lines): if not entry_lines: return for line in entry_lines: if parse_bug_id_from_changelog(line): break if re.search("Unreviewed", line, re.IGNORECASE): break if re.search("build", line, re.IGNORECASE) and re.search("fix", line, re.IGNORECASE): break else: self.handle_style_error(first_line_checked, "changelog/bugnumber", 5, "ChangeLog entry has no bug number") def check(self, lines): self._tab_checker.check(lines) first_line_checked = 0 entry_lines = [] for line_index, line in enumerate(lines): if not self.should_line_be_checked(line_index + 1): # If we transitioned from finding changed lines to # unchanged lines, then we are done. if first_line_checked: break continue if not first_line_checked: first_line_checked = line_index + 1 entry_lines.append(line) self.check_entry(first_line_checked, entry_lines) #! /usr/bin/env python """Conversions to/from quoted-printable transport encoding as per RFC 1521.""" # (Dec 1991 version). __all__ = ["encode", "decode", "encodestring", "decodestring"] ESCAPE = '=' MAXLINESIZE = 76 HEX = '0123456789ABCDEF' EMPTYSTRING = '' try: from binascii import a2b_qp, b2a_qp except ImportError: a2b_qp = None b2a_qp = None def needsquoting(c, quotetabs, header): """Decide whether a particular character needs to be quoted. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. """ if c in ' \t': return quotetabs # if header, we have to escape _ because _ is used to escape space if c == '_': return header return c == ESCAPE or not (' ' <= c <= '~') def quote(c): """Quote a single character.""" i = ord(c) return ESCAPE + HEX[i//16] + HEX[i%16] def encode(input, output, quotetabs, header = 0): """Read 'input', apply quoted-printable encoding, and write to 'output'. 'input' and 'output' are files with readline() and write() methods. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. The 'header' flag indicates whether we are encoding spaces as _ as per RFC 1522. """ if b2a_qp is not None: data = input.read() odata = b2a_qp(data, quotetabs = quotetabs, header = header) output.write(odata) return def write(s, output=output, lineEnd='\n'): # RFC 1521 requires that the line ending in a space or tab must have # that trailing character encoded. if s and s[-1:] in ' \t': output.write(s[:-1] + quote(s[-1]) + lineEnd) elif s == '.': output.write(quote(s) + lineEnd) else: output.write(s + lineEnd) prevline = None while 1: line = input.readline() if not line: break outline = [] # Strip off any readline induced trailing newline stripped = '' if line[-1:] == '\n': line = line[:-1] stripped = '\n' # Calculate the un-length-limited encoded line for c in line: if needsquoting(c, quotetabs, header): c = quote(c) if header and c == ' ': outline.append('_') else: outline.append(c) # First, write out the previous line if prevline is not None: write(prevline) # Now see if we need any soft line breaks because of RFC-imposed # length limitations. Then do the thisline->prevline dance. thisline = EMPTYSTRING.join(outline) while len(thisline) > MAXLINESIZE: # Don't forget to include the soft line break `=' sign in the # length calculation! write(thisline[:MAXLINESIZE-1], lineEnd='=\n') thisline = thisline[MAXLINESIZE-1:] # Write out the current line prevline = thisline # Write out the last line, without a trailing newline if prevline is not None: write(prevline, lineEnd=stripped) def encodestring(s, quotetabs = 0, header = 0): if b2a_qp is not None: return b2a_qp(s, quotetabs = quotetabs, header = header) from cStringIO import StringIO infp = StringIO(s) outfp = StringIO() encode(infp, outfp, quotetabs, header) return outfp.getvalue() def decode(input, output, header = 0): """Read 'input', apply quoted-printable decoding, and write to 'output'. 'input' and 'output' are files with readline() and write() methods. If 'header' is true, decode underscore as space (per RFC 1522).""" if a2b_qp is not None: data = input.read() odata = a2b_qp(data, header = header) output.write(odata) return new = '' while 1: line = input.readline() if not line: break i, n = 0, len(line) if n > 0 and line[n-1] == '\n': partial = 0; n = n-1 # Strip trailing whitespace while n > 0 and line[n-1] in " \t\r": n = n-1 else: partial = 1 while i < n: c = line[i] if c == '_' and header: new = new + ' '; i = i+1 elif c != ESCAPE: new = new + c; i = i+1 elif i+1 == n and not partial: partial = 1; break elif i+1 < n and line[i+1] == ESCAPE: new = new + ESCAPE; i = i+2 elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]): new = new + chr(unhex(line[i+1:i+3])); i = i+3 else: # Bad escape sequence -- leave it in new = new + c; i = i+1 if not partial: output.write(new + '\n') new = '' if new: output.write(new) def decodestring(s, header = 0): if a2b_qp is not None: return a2b_qp(s, header = header) from cStringIO import StringIO infp = StringIO(s) outfp = StringIO() decode(infp, outfp, header = header) return outfp.getvalue() # Other helper functions def ishex(c): """Return true if the character 'c' is a hexadecimal digit.""" return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F' def unhex(s): """Get the integer value of a hexadecimal number.""" bits = 0 for c in s: if '0' <= c <= '9': i = ord('0') elif 'a' <= c <= 'f': i = ord('a')-10 elif 'A' <= c <= 'F': i = ord('A')-10 else: break bits = bits*16 + (ord(c) - i) return bits def main(): import sys import getopt try: opts, args = getopt.getopt(sys.argv[1:], 'td') except getopt.error, msg: sys.stdout = sys.stderr print msg print "usage: quopri [-t | -d] [file] ..." print "-t: quote tabs" print "-d: decode; default encode" sys.exit(2) deco = 0 tabs = 0 for o, a in opts: if o == '-t': tabs = 1 if o == '-d': deco = 1 if tabs and deco: sys.stdout = sys.stderr print "-t and -d are mutually exclusive" sys.exit(2) if not args: args = ['-'] sts = 0 for file in args: if file == '-': fp = sys.stdin else: try: fp = open(file) except IOError, msg: sys.stderr.write("%s: can't open (%s)\n" % (file, msg)) sts = 1 continue if deco: decode(fp, sys.stdout) else: encode(fp, sys.stdout, tabs) if fp is not sys.stdin: fp.close() if sts: sys.exit(sts) if __name__ == '__main__': main() """ sentry.models.release ~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function from django.db import models from django.utils import timezone from hashlib import md5 from jsonfield import JSONField from sentry.db.models import ( BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr ) from sentry.utils.cache import cache class Release(Model): """ A release is generally created when a new version is pushed into a production state. """ __core__ = False project = FlexibleForeignKey('sentry.Project') version = models.CharField(max_length=64) # ref might be the branch name being released ref = models.CharField(max_length=64, null=True, blank=True) url = models.URLField(null=True, blank=True) date_added = models.DateTimeField(default=timezone.now) date_started = models.DateTimeField(null=True, blank=True) date_released = models.DateTimeField(null=True, blank=True) # arbitrary data recorded with the release data = JSONField(default={}) new_groups = BoundedPositiveIntegerField(default=0) class Meta: app_label = 'sentry' db_table = 'sentry_release' unique_together = (('project', 'version'),) __repr__ = sane_repr('project_id', 'version') @classmethod def get_cache_key(cls, project_id, version): return 'release:2:%s:%s' % (project_id, md5(version).hexdigest()) @classmethod def get(cls, project, version): cache_key = cls.get_cache_key(project.id, version) release = cache.get(cache_key) if release is None: try: release = cls.objects.get( project=project, version=version, ) except cls.DoesNotExist: release = -1 cache.set(cache_key, release, 300) if release == -1: return return release @classmethod def get_or_create(cls, project, version, date_added): cache_key = cls.get_cache_key(project.id, version) release = cache.get(cache_key) if release in (None, -1): # TODO(dcramer): if the cache result is -1 we could attempt a # default create here instead of default get release = cls.objects.get_or_create( project=project, version=version, defaults={ 'date_added': date_added, }, )[0] cache.set(cache_key, release, 3600) return release #!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. description: - Creates and removes firewall rules. version_added: '2.0' author: "René Moser (@resmo)" options: ip_address: description: - Public IP address the ingress rule is assigned to. - Required if C(type=ingress). required: false default: null network: description: - Network the egress rule is related to. - Required if C(type=egress). required: false default: null state: description: - State of the firewall rule. required: false default: 'present' choices: [ 'present', 'absent' ] type: description: - Type of the firewall rule. required: false default: 'ingress' choices: [ 'ingress', 'egress' ] protocol: description: - Protocol of the firewall rule. - C(all) is only available if C(type=egress) required: false default: 'tcp' choices: [ 'tcp', 'udp', 'icmp', 'all' ] cidr: description: - CIDR (full notation) to be used for firewall rule. required: false default: '0.0.0.0/0' start_port: description: - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null aliases: [ 'port' ] end_port: description: - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port). required: false default: null icmp_type: description: - Type of the icmp message being sent. Considered if C(protocol=icmp). required: false default: null icmp_code: description: - Error code for this icmp message. Considered if C(protocol=icmp). required: false default: null domain: description: - Domain the firewall rule is related to. required: false default: null account: description: - Account the firewall rule is related to. required: false default: null project: description: - Name of the project the firewall rule is related to. required: false default: null zone: description: - Name of the zone in which the virtual machine is in. - If not set, default zone is used. required: false default: null poll_async: description: - Poll async jobs until job has finished. required: false default: true extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 - local_action: module: cs_firewall ip_address: 4.3.2.1 port: 80 cidr: 1.2.3.4/32 # Allow inbound tcp/udp port 53 to 4.3.2.1 - local_action: module: cs_firewall ip_address: 4.3.2.1 port: 53 protocol: '{{ item }}' with_items: - tcp - udp # Ensure firewall rule is removed - local_action: module: cs_firewall ip_address: 4.3.2.1 start_port: 8000 end_port: 8888 cidr: 17.0.0.0/8 state: absent # Allow all outbound traffic - local_action: module: cs_firewall network: my_network type: egress protocol: all # Allow only HTTP outbound traffic for an IP - local_action: module: cs_firewall network: my_network type: egress port: 80 cidr: 10.101.1.20 ''' RETURN = ''' --- id: description: UUID of the rule. returned: success type: string sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 ip_address: description: IP address of the rule if C(type=ingress) returned: success type: string sample: 10.100.212.10 type: description: Type of the rule. returned: success type: string sample: ingress cidr: description: CIDR of the rule. returned: success type: string sample: 0.0.0.0/0 protocol: description: Protocol of the rule. returned: success type: string sample: tcp start_port: description: Start port of the rule. returned: success type: int sample: 80 end_port: description: End port of the rule. returned: success type: int sample: 80 icmp_code: description: ICMP code of the rule. returned: success type: int sample: 1 icmp_type: description: ICMP type of the rule. returned: success type: int sample: 1 network: description: Name of the network if C(type=egress) returned: success type: string sample: my_network ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, CloudStackException, cs_argument_spec, cs_required_together ) class AnsibleCloudStackFirewall(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackFirewall, self).__init__(module) self.returns = { 'cidrlist': 'cidr', 'startport': 'start_port', 'endport': 'end_port', 'protocol': 'protocol', 'ipaddress': 'ip_address', 'icmpcode': 'icmp_code', 'icmptype': 'icmp_type', } self.firewall_rule = None self.network = None def get_firewall_rule(self): if not self.firewall_rule: cidr = self.module.params.get('cidr') protocol = self.module.params.get('protocol') start_port = self.module.params.get('start_port') end_port = self.get_or_fallback('end_port', 'start_port') icmp_code = self.module.params.get('icmp_code') icmp_type = self.module.params.get('icmp_type') fw_type = self.module.params.get('type') if protocol in ['tcp', 'udp'] and not (start_port and end_port): self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol) if protocol == 'icmp' and not icmp_type: self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type") if protocol == 'all' and fw_type != 'egress': self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'") args = { 'account': self.get_account('name'), 'domainid': self.get_domain('id'), 'projectid': self.get_project('id') } if fw_type == 'egress': args['networkid'] = self.get_network(key='id') if not args['networkid']: self.module.fail_json(msg="missing required argument for type egress: network") firewall_rules = self.cs.listEgressFirewallRules(**args) else: args['ipaddressid'] = self.get_ip_address('id') if not args['ipaddressid']: self.module.fail_json(msg="missing required argument for type ingress: ip_address") firewall_rules = self.cs.listFirewallRules(**args) if firewall_rules and 'firewallrule' in firewall_rules: for rule in firewall_rules['firewallrule']: type_match = self._type_cidr_match(rule, cidr) protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) or self._icmp_match(rule, protocol, icmp_code, icmp_type) or self._egress_all_match(rule, protocol, fw_type) ) if type_match and protocol_match: self.firewall_rule = rule break return self.firewall_rule def _tcp_udp_match(self, rule, protocol, start_port, end_port): return ( protocol in ['tcp', 'udp'] and protocol == rule['protocol'] and start_port == int(rule['startport']) and end_port == int(rule['endport']) ) def _egress_all_match(self, rule, protocol, fw_type): return ( protocol in ['all'] and protocol == rule['protocol'] and fw_type == 'egress' )