diff --git "a/codeparrot-valid_1042.txt" "b/codeparrot-valid_1042.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1042.txt" @@ -0,0 +1,10000 @@ +# +# Texts in x-mac-hebrew are almost impossible to find on the Internet. From +# what little evidence I could find, it seems that its general directionality +# is Logical. +# +# To sum up all of the above, the Hebrew probing mechanism knows about two +# charsets: +# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are +# backwards while line order is natural. For charset recognition purposes +# the line order is unimportant (In fact, for this implementation, even +# word order is unimportant). +# Logical Hebrew - "windows-1255" - normal, naturally ordered text. +# +# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be +# specifically identified. +# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew +# that contain special punctuation marks or diacritics is displayed with +# some unconverted characters showing as question marks. This problem might +# be corrected using another model prober for x-mac-hebrew. Due to the fact +# that x-mac-hebrew texts are so rare, writing another model prober isn't +# worth the effort and performance hit. +# +#### The Prober #### +# +# The prober is divided between two SBCharSetProbers and a HebrewProber, +# all of which are managed, created, fed data, inquired and deleted by the +# SBCSGroupProber. The two SBCharSetProbers identify that the text is in +# fact some kind of Hebrew, Logical or Visual. The final decision about which +# one is it is made by the HebrewProber by combining final-letter scores +# with the scores of the two SBCharSetProbers to produce a final answer. +# +# The SBCSGroupProber is responsible for stripping the original text of HTML +# tags, English characters, numbers, low-ASCII punctuation characters, spaces +# and new lines. It reduces any sequence of such characters to a single space. +# The buffer fed to each prober in the SBCS group prober is pure text in +# high-ASCII. +# The two SBCharSetProbers (model probers) share the same language model: +# Win1255Model. +# The first SBCharSetProber uses the model normally as any other +# SBCharSetProber does, to recognize windows-1255, upon which this model was +# built. The second SBCharSetProber is told to make the pair-of-letter +# lookup in the language model backwards. This in practice exactly simulates +# a visual Hebrew model using the windows-1255 logical Hebrew model. +# +# The HebrewProber is not using any language model. All it does is look for +# final-letter evidence suggesting the text is either logical Hebrew or visual +# Hebrew. Disjointed from the model probers, the results of the HebrewProber +# alone are meaningless. HebrewProber always returns 0.00 as confidence +# since it never identifies a charset by itself. Instead, the pointer to the +# HebrewProber is passed to the model probers as a helper "Name Prober". +# When the Group prober receives a positive identification from any prober, +# it asks for the name of the charset identified. If the prober queried is a +# Hebrew model prober, the model prober forwards the call to the +# HebrewProber to make the final decision. In the HebrewProber, the +# decision is made according to the final-letters scores maintained and Both +# model probers scores. The answer is returned in the form of the name of the +# charset identified, either "windows-1255" or "ISO-8859-8". + +class HebrewProber(CharSetProber): + # windows-1255 / ISO-8859-8 code points of interest + FINAL_KAF = 0xea + NORMAL_KAF = 0xeb + FINAL_MEM = 0xed + NORMAL_MEM = 0xee + FINAL_NUN = 0xef + NORMAL_NUN = 0xf0 + FINAL_PE = 0xf3 + NORMAL_PE = 0xf4 + FINAL_TSADI = 0xf5 + NORMAL_TSADI = 0xf6 + + # Minimum Visual vs Logical final letter score difference. + # If the difference is below this, don't rely solely on the final letter score + # distance. + MIN_FINAL_CHAR_DISTANCE = 5 + + # Minimum Visual vs Logical model score difference. + # If the difference is below this, don't rely at all on the model score + # distance. + MIN_MODEL_DISTANCE = 0.01 + + VISUAL_HEBREW_NAME = "ISO-8859-8" + LOGICAL_HEBREW_NAME = "windows-1255" + + def __init__(self): + super(HebrewProber, self).__init__() + self._final_char_logical_score = None + self._final_char_visual_score = None + self._prev = None + self._before_prev = None + self._logical_prober = None + self._visual_prober = None + self.reset() + + def reset(self): + self._final_char_logical_score = 0 + self._final_char_visual_score = 0 + # The two last characters seen in the previous buffer, + # mPrev and mBeforePrev are initialized to space in order to simulate + # a word delimiter at the beginning of the data + self._prev = ' ' + self._before_prev = ' ' + # These probers are owned by the group prober. + + def set_model_probers(self, logicalProber, visualProber): + self._logical_prober = logicalProber + self._visual_prober = visualProber + + def is_final(self, c): + return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, + self.FINAL_PE, self.FINAL_TSADI] + + def is_non_final(self, c): + # The normal Tsadi is not a good Non-Final letter due to words like + # 'lechotet' (to chat) containing an apostrophe after the tsadi. This + # apostrophe is converted to a space in FilterWithoutEnglishLetters + # causing the Non-Final tsadi to appear at an end of a word even + # though this is not the case in the original text. + # The letters Pe and Kaf rarely display a related behavior of not being + # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' + # for example legally end with a Non-Final Pe or Kaf. However, the + # benefit of these letters as Non-Final letters outweighs the damage + # since these words are quite rare. + return c in [self.NORMAL_KAF, self.NORMAL_MEM, + self.NORMAL_NUN, self.NORMAL_PE] + + def feed(self, byte_str): + # Final letter analysis for logical-visual decision. + # Look for evidence that the received buffer is either logical Hebrew + # or visual Hebrew. + # The following cases are checked: + # 1) A word longer than 1 letter, ending with a final letter. This is + # an indication that the text is laid out "naturally" since the + # final letter really appears at the end. +1 for logical score. + # 2) A word longer than 1 letter, ending with a Non-Final letter. In + # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, + # should not end with the Non-Final form of that letter. Exceptions + # to this rule are mentioned above in isNonFinal(). This is an + # indication that the text is laid out backwards. +1 for visual + # score + # 3) A word longer than 1 letter, starting with a final letter. Final + # letters should not appear at the beginning of a word. This is an + # indication that the text is laid out backwards. +1 for visual + # score. + # + # The visual score and logical score are accumulated throughout the + # text and are finally checked against each other in GetCharSetName(). + # No checking for final letters in the middle of words is done since + # that case is not an indication for either Logical or Visual text. + # + # We automatically filter out all 7-bit characters (replace them with + # spaces) so the word boundary detection works properly. [MAP] + + if self.state == ProbingState.NOT_ME: + # Both model probers say it's not them. No reason to continue. + return ProbingState.NOT_ME + + byte_str = self.filter_high_byte_only(byte_str) + + for cur in byte_str: + if cur == ' ': + # We stand on a space - a word just ended + if self._before_prev != ' ': + # next-to-last char was not a space so self._prev is not a + # 1 letter word + if self.is_final(self._prev): + # case (1) [-2:not space][-1:final letter][cur:space] + self._final_char_logical_score += 1 + elif self.is_non_final(self._prev): + # case (2) [-2:not space][-1:Non-Final letter][ + # cur:space] + self._final_char_visual_score += 1 + else: + # Not standing on a space + if ((self._before_prev == ' ') and + (self.is_final(self._prev)) and (cur != ' ')): + # case (3) [-2:space][-1:final letter][cur:not space] + self._final_char_visual_score += 1 + self._before_prev = self._prev + self._prev = cur + + # Forever detecting, till the end or until both model probers return + # ProbingState.NOT_ME (handled above) + return ProbingState.DETECTING + + @property + def charset_name(self): + # Make the decision: is it Logical or Visual? + # If the final letter score distance is dominant enough, rely on it. + finalsub = self._final_char_logical_score - self._final_char_visual_score + if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: + return self.VISUAL_HEBREW_NAME + + # It's not dominant enough, try to rely on the model scores instead. + modelsub = (self._logical_prober.get_confidence() + - self._visual_prober.get_confidence()) + if modelsub > self.MIN_MODEL_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if modelsub < -self.MIN_MODEL_DISTANCE: + return self.VISUAL_HEBREW_NAME + + # Still no good, back to final letter distance, maybe it'll save the + # day. + if finalsub < 0.0: + return self.VISUAL_HEBREW_NAME + + # (finalsub > 0 - Logical) or (don't know what to do) default to + # Logical. + return self.LOGICAL_HEBREW_NAME + + @property + def language(self): + return 'Hebrew' + + @property + def state(self): + # Remain active as long as any of the model probers are active. + if (self._logical_prober.state == ProbingState.NOT_ME) and \ + (self._visual_prober.state == ProbingState.NOT_ME): + return ProbingState.NOT_ME + return ProbingState.DETECTING + +""" +Unittests for creating a course in an chosen modulestore +""" +import unittest +import ddt +from django.core.management import CommandError, call_command + +from contentstore.management.commands.create_course import Command +from xmodule.modulestore import ModuleStoreEnum +from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase +from xmodule.modulestore.django import modulestore + + +class TestArgParsing(unittest.TestCase): + """ + Tests for parsing arguments for the `create_course` management command + """ + def setUp(self): + self.command = Command() + + def test_no_args(self): + errstring = "create_course requires 5 arguments" + with self.assertRaisesRegexp(CommandError, errstring): + self.command.handle('create_course') + + def test_invalid_store(self): + with self.assertRaises(CommandError): + self.command.handle("foo", "user@foo.org", "org", "course", "run") + + def test_xml_store(self): + with self.assertRaises(CommandError): + self.command.handle(ModuleStoreEnum.Type.xml, "user@foo.org", "org", "course", "run") + + def test_nonexistent_user_id(self): + errstring = "No user 99 found" + with self.assertRaisesRegexp(CommandError, errstring): + self.command.handle("split", "99", "org", "course", "run") + + def test_nonexistent_user_email(self): + errstring = "No user fake@example.com found" + with self.assertRaisesRegexp(CommandError, errstring): + self.command.handle("mongo", "fake@example.com", "org", "course", "run") + + +@ddt.ddt +class TestCreateCourse(ModuleStoreTestCase): + """ + Unit tests for creating a course in either old mongo or split mongo via command line + """ + + def setUp(self): + super(TestCreateCourse, self).setUp(create_user=True) + + @ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split) + def test_all_stores_user_email(self, store): + call_command( + "create_course", + store, + str(self.user.email), + "org", "course", "run" + ) + new_key = modulestore().make_course_key("org", "course", "run") + self.assertTrue( + modulestore().has_course(new_key), + "Could not find course in {}".format(store) + ) + # pylint: disable=protected-access + self.assertEqual(store, modulestore()._get_modulestore_for_courseid(new_key).get_modulestore_type()) + +from sympy import S, symbols, I, atan, log, Poly, sqrt, simplify, integrate + +from sympy.integrals.rationaltools import ratint, ratint_logpart, log_to_atan + +from sympy.abc import a, b, x, t + +half = S(1)/2 + + +def test_ratint(): + assert ratint(S(0), x) == 0 + assert ratint(S(7), x) == 7*x + + assert ratint(x, x) == x**2/2 + assert ratint(2*x, x) == x**2 + assert ratint(-2*x, x) == -x**2 + + assert ratint(8*x**7 + 2*x + 1, x) == x**8 + x**2 + x + + f = S(1) + g = x + 1 + + assert ratint(f / g, x) == log(x + 1) + assert ratint((f, g), x) == log(x + 1) + + f = x**3 - x + g = x - 1 + + assert ratint(f/g, x) == x**3/3 + x**2/2 + + f = x + g = (x - a)*(x + a) + + assert ratint(f/g, x) == log(x**2 - a**2)/2 + + f = S(1) + g = x**2 + 1 + + assert ratint(f/g, x, real=None) == atan(x) + assert ratint(f/g, x, real=True) == atan(x) + + assert ratint(f/g, x, real=False) == I*log(x + I)/2 - I*log(x - I)/2 + + f = S(36) + g = x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2 + + assert ratint(f/g, x) == \ + -4*log(x + 1) + 4*log(x - 2) + (12*x + 6)/(x**2 - 1) + + f = x**4 - 3*x**2 + 6 + g = x**6 - 5*x**4 + 5*x**2 + 4 + + assert ratint(f/g, x) == \ + atan(x) + atan(x**3) + atan(x/2 - 3*x**S(3)/2 + S(1)/2*x**5) + + f = x**7 - 24*x**4 - 4*x**2 + 8*x - 8 + g = x**8 + 6*x**6 + 12*x**4 + 8*x**2 + + assert ratint(f/g, x) == \ + (4 + 6*x + 8*x**2 + 3*x**3)/(4*x + 4*x**3 + x**5) + log(x) + + assert ratint((x**3*f)/(x*g), x) == \ + -(12 - 16*x + 6*x**2 - 14*x**3)/(4 + 4*x**2 + x**4) - \ + 5*sqrt(2)*atan(x*sqrt(2)/2) + S(1)/2*x**2 - 3*log(2 + x**2) + + f = x**5 - x**4 + 4*x**3 + x**2 - x + 5 + g = x**4 - 2*x**3 + 5*x**2 - 4*x + 4 + + assert ratint(f/g, x) == \ + x + S(1)/2*x**2 + S(1)/2*log(2 - x + x**2) - (4*x - 9)/(14 - 7*x + 7*x**2) + \ + 13*sqrt(7)*atan(-S(1)/7*sqrt(7) + 2*x*sqrt(7)/7)/49 + + assert ratint(1/(x**2 + x + 1), x) == \ + 2*sqrt(3)*atan(sqrt(3)/3 + 2*x*sqrt(3)/3)/3 + + assert ratint(1/(x**3 + 1), x) == \ + -log(1 - x + x**2)/6 + log(1 + x)/3 + sqrt(3)*atan(-sqrt(3) + /3 + 2*x*sqrt(3)/3)/3 + + assert ratint(1/(x**2 + x + 1), x, real=False) == \ + -I*3**half*log(half + x - half*I*3**half)/3 + \ + I*3**half*log(half + x + half*I*3**half)/3 + + assert ratint(1/(x**3 + 1), x, real=False) == log(1 + x)/3 + \ + (-S(1)/6 + I*3**half/6)*log(-half + x + I*3**half/2) + \ + (-S(1)/6 - I*3**half/6)*log(-half + x - I*3**half/2) + + # issue 4991 + assert ratint(1/(x*(a + b*x)**3), x) == \ + (3*a + 2*b*x)/(2*a**4 + 4*a**3*b*x + 2*a**2*b**2*x**2) + ( + log(x) - log(a/b + x))/a**3 + + assert ratint(x/(1 - x**2), x) == -log(x**2 - 1)/2 + assert ratint(-x/(1 - x**2), x) == log(x**2 - 1)/2 + + assert ratint((x/4 - 4/(1 - x)).diff(x), x) == x/4 + 4/(x - 1) + + ans = atan(x) + assert ratint(1/(x**2 + 1), x, symbol=x) == ans + assert ratint(1/(x**2 + 1), x, symbol='x') == ans + assert ratint(1/(x**2 + 1), x, symbol=a) == ans + + +def test_ratint_logpart(): + assert ratint_logpart(x, x**2 - 9, x, t) == \ + [(Poly(x**2 - 9, x), Poly(-2*t + 1, t))] + assert ratint_logpart(x**2, x**3 - 5, x, t) == \ + [(Poly(x**3 - 5, x), Poly(-3*t + 1, t))] + + +def test_issue_5414(): + assert ratint(1/(x**2 + 16), x) == atan(x/4)/4 + + +def test_issue_5249(): + assert ratint( + 1/(x**2 + a**2), x) == (-I*log(-I*a + x)/2 + I*log(I*a + x)/2)/a + + +def test_issue_5817(): + a, b, c = symbols('a,b,c', positive=True) + + assert simplify(ratint(a/(b*c*x**2 + a**2 + b*a), x)) == \ + sqrt(a)*atan(sqrt( + b)*sqrt(c)*x/(sqrt(a)*sqrt(a + b)))/(sqrt(b)*sqrt(c)*sqrt(a + b)) + + +def test_issue_5981(): + u = symbols('u') + assert integrate(1/(u**2 + 1)) == atan(u) + + +def test_log_to_atan(): + f, g = (Poly(x + S(1)/2, x, domain='QQ'), Poly(sqrt(3)/2, x, domain='EX')) + fg_ans = 2*atan(2*sqrt(3)*x/3 + sqrt(3)/3) + assert log_to_atan(f, g) == fg_ans + assert log_to_atan(g, f) == -fg_ans + +import tensorflow as tf +import numpy as np +from ionotomo.settings import TFSettings +from ionotomo.tomography.interpolation import RegularGridInterpolator +from ionotomo.tomography.integrate import simps + +class RayOp(object): + r"""Linear operator that performs for any v(x) + + h[i1,...,ir] = \int_R[i1,...,ir] ds M(x) v(x) + + grid : tuple of ndim Tensors specifying grid coordinates used for interpolation + M : the function over V to integrate, defined on the *grid* + rays : Tensor with *r* ray index dimensions and last dim is size ndim + Defines the ray trajectories over which to integrate. + Shape (i1,...,ir, ndim, N) + transpose : bool + If True then Av represents \sum_R \Delta_R(x) v_R M(x) + """ + def __init__(self,grid,M,rays,dx = None, + weight = None, transpose = False, + dtype=TFSettings.tf_float): + self.dtype = dtype + self.grid = grid + self.rays = tf.cast(rays,TFSettings.tf_float) + if dx is None: + self.dx = tf.sqrt(tf.reduce_sum(tf.square(self.rays[...,1:] - self.rays[...,:-1]),axis=-2)) + self.dx = tf.cumsum(tf.concat([tf.zeros_like(self.dx[...,0:1]),self.dx],axis=-1),axis=-1) + else: + nd = tf.size(tf.shape(rays)) + dxshape = tf.concat([tf.ones_like(tf.shape(rays)[0:-2]), + tf.shape(rays)[nd-1:nd]],axis=0) + self.dx = tf.reshape(dx,dxshape) + if weight is not None: + self.weight = tf.reshape(tf.cast(weight,self.dtype),self.range_shape()) + else: + self.weight = None + self.M = tf.cast(M,self.dtype) + self.transpose = transpose + + def domain_shape(self): + return tf.shape(self.M) + + def range_shape(self): + return tf.shape(self.rays)[:-2] + + def shape(self): + return tf.concat([self.range_shape(),self.domain_shape()],axis=0) + + def matmul(self,x,adjoint=False,adjoint_arg=False): + '''Transform [batch] matrix x with left multiplication: x --> Ax. + + x: Tensor with compatible shape and same dtype as self. + See class docstring for definition of compatibility. + adjoint: Python bool. If True, left multiply by the adjoint: A^H x. + adjoint_arg: Python bool. + If True, compute A x^H where x^H is the hermitian transpose + (transposition and complex conjugation). + name: A name for this `Op. + + Returns: + A Tensor with shape [..., M, R] and same dtype as self. + ''' + + x = tf.cast(x,self.dtype) + Ax = self.M * x + Ax = RegularGridInterpolator(self.grid,Ax,method='linear') + if self.weight is None: + Ax = Ax(tf.unstack(self.rays,axis=-2)) + else: + Ax = self.weight*Ax(tf.unstack(self.rays,axis=-2)) + Ax = simps(Ax, self.dx,axis = -1) + return Ax + +class TECForwardEquation(RayOp): + def __init__(self,i0, grid,M,rays,dx = None, + weight = None, transpose = False, + dtype=TFSettings.tf_float): + super(TECForwardEquation,self).__init__(grid,M,rays,dx, + weight, transpose, dtype) + self.i0 = tf.cast(i0,TFSettings.tf_int) + def matmul(self,x,adjoint=False,adjoint_arg=False): + '''Transform [batch] matrix x with left multiplication: x --> Ax. + + x: Tensor with compatible shape and same dtype as self. + See class docstring for definition of compatibility. + adjoint: Python bool. If True, left multiply by the adjoint: A^H x. + adjoint_arg: Python bool. + If True, compute A x^H where x^H is the hermitian transpose + (transposition and complex conjugation). + name: A name for this `Op. + + Returns: + A Tensor with shape [..., M, R] and same dtype as self. + ''' + Ax = super(TECForwardEquation,self).matmul(x) + Ax = Ax - Ax[self.i0:self.i0+1, ...] + return Ax + +if __name__ == '__main__': + rays = np.sort(np.random.uniform(size=[2,2,3,6]),axis=-1) + M = np.random.normal(size=(100,100,100)) + grid = (np.linspace(0,1,100),)*3 + op = TECForwardEquation(0,grid, M, rays) + x = np.random.normal(size=(100,100,100)) + sess = tf.Session() + print(sess.run(op.matmul(x))) + sess.close() + +# system call counts +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide system call totals, broken down by syscall. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import syscall_name + +usage = "perf script -s syscall-counts.py [comm]\n"; + +for_comm = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + print "Press control+C to stop and show the summary" + +def trace_end(): + print_syscall_totals() + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, id, args): + if for_comm is not None: + if common_comm != for_comm: + return + try: + syscalls[id] += 1 + except TypeError: + syscalls[id] = 1 + +def syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + raw_syscalls__sys_enter(**locals()) + +def print_syscall_totals(): + if for_comm is not None: + print "\nsyscall events for %s:\n\n" % (for_comm), + else: + print "\nsyscall events:\n\n", + + print "%-40s %10s\n" % ("event", "count"), + print "%-40s %10s\n" % ("----------------------------------------", \ + "-----------"), + + for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ + reverse = True): + print "%-40s %10d\n" % (syscall_name(id), val), + +# Copyright 2014 Mirantis Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import oslo.messaging + +from ceilometer import plugin +from ceilometer import sample + +OPTS = [ + cfg.StrOpt('keystone_control_exchange', + default='keystone', + help="Exchange name for Keystone notifications."), +] + + +cfg.CONF.register_opts(OPTS) + +SERVICE = 'identity' + + +class _Base(plugin.NotificationBase): + """Convert identity notification into Samples.""" + + resource_type = None + resource_name = None + + @staticmethod + def get_targets(conf): + """Return a sequence of oslo.messaging.Target + + Sequence defining the exchange and topics to be connected for this + plugin. + """ + return [oslo.messaging.Target(topic=topic, + exchange=conf.keystone_control_exchange) + for topic in conf.notification_topics] + + +class IdentityCRUD(_Base): + def process_notification(self, message): + yield sample.Sample.from_notification( + name=message['event_type'], + type=sample.TYPE_DELTA, + unit=self.resource_type, + volume=1, + resource_id=message['payload']['resource_info'], + user_id=None, + project_id=None, + message=message) + + +class User(IdentityCRUD): + + resource_type = 'user' + resource_name = '%s.%s' % (SERVICE, resource_type) + + @property + def event_types(self): + return ['%s.*' % self.resource_name] + + +class Group(IdentityCRUD): + + resource_type = 'group' + resource_name = '%s.%s' % (SERVICE, resource_type) + + @property + def event_types(self): + return ['%s.*' % self.resource_name] + + +class Project(IdentityCRUD): + + resource_type = 'project' + resource_name = '%s.%s' % (SERVICE, resource_type) + + @property + def event_types(self): + return ['%s.*' % self.resource_name] + + +class Role(IdentityCRUD): + + resource_type = 'role' + resource_name = '%s.%s' % (SERVICE, resource_type) + + @property + def event_types(self): + return ['%s.*' % self.resource_name] + + +class Trust(IdentityCRUD): + + resource_type = 'trust' + resource_name = '%s.%s' % (SERVICE, resource_type) + + @property + def event_types(self): + return [ + '%s.created' % self.resource_name, + '%s.deleted' % self.resource_name, + ] + + +class Authenticate(_Base): + """Convert identity authentication notifications into Samples.""" + + resource_type = 'authenticate' + event_name = '%s.%s' % (SERVICE, resource_type) + + def process_notification(self, message): + outcome = message['payload']['outcome'] + meter_name = '%s.%s.%s' % (SERVICE, self.resource_type, outcome) + + yield sample.Sample.from_notification( + name=meter_name, + type=sample.TYPE_DELTA, + unit='user', + volume=1, + resource_id=message['payload']['initiator']['id'], + user_id=message['payload']['initiator']['id'], + project_id=None, + message=message) + + @property + def event_types(self): + return [self.event_name] + + +class RoleAssignment(_Base): + """Convert role assignment notifications into Samples.""" + + resource_type = 'role_assignment' + resource_name = '%s.%s' % (SERVICE, resource_type) + + def process_notification(self, message): + # NOTE(stevemar): action is created.role_assignment + action = message['payload']['action'] + event, resource_type = action.split(".") + + # NOTE(stevemar): meter_name is identity.role_assignment.created + meter_name = '%s.%s.%s' % (SERVICE, resource_type, event) + + yield sample.Sample.from_notification( + name=meter_name, + type=sample.TYPE_DELTA, + unit=self.resource_type, + volume=1, + resource_id=message['payload']['role'], + user_id=message['payload']['initiator']['id'], + project_id=None, + message=message) + + @property + def event_types(self): + return [ + '%s.created' % self.resource_name, + '%s.deleted' % self.resource_name, + ] + +""" +Example demonstrating how to fit a complex H-alpha profile after subtracting off a satellite line +(in this case, He I 6678.151704) +""" +import pyspeckit + +sp = pyspeckit.OpticalSpectrum('sn2009ip_halpha.fits') + +# start by plotting a small region around the H-alpha line +sp.plotter(xmin=6100,xmax=7000,ymax=2.23,ymin=0) + +# the baseline (continuum) fit will be 2nd order, and excludes "bad" +# parts of the spectrum +# The exclusion zone was selected interatively (i.e., cursor hovering over the spectrum) +sp.baseline(xmin=6100, xmax=7000, + exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231], + subtract=False, reset_selection=True, highlight_fitregion=True, + order=2) + +# Fit a 4-parameter voigt (figured out through a series if guess and check fits) +sp.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1, + 0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1, + 0.11957267912208754, 6678.3853431367716, 4.1892742162283181, 1, + 0.10506431180136294, 6589.9310414408683, 72.378997529374672, 1,], + fittype='voigt') + +# Now overplot the fitted components with an offset so we can see them +# the add_baseline=True bit means that each component will be displayed with the "Continuum" added +# If this was off, the components would be displayed at y=0 +# the component_yoffset is the offset to add to the continuum for plotting only (a constant) +sp.specfit.plot_components(add_baseline=True,component_yoffset=-0.2) + +# Now overplot the residuals on the same graph by specifying which axis to overplot it on +# clear=False is needed to keep the original fitted plot drawn +# yoffset is the offset from y=zero +sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=0.20,label=False) + +# save the figure +sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoom.png") + +# print the fit results in table form +# This includes getting the equivalent width for each component using sp.specfit.EQW +print " ".join(["%15s %15s" % (s,s+"err") for s in sp.specfit.parinfo.parnames])," ".join(["%15s" % ("EQW"+str(i)) for i,w in enumerate(sp.specfit.EQW(components=True))]) +print " ".join(["%15g %15g" % (par.value,par.error) for par in sp.specfit.parinfo])," ".join(["%15g" % w for w in sp.specfit.EQW(components=True)]) + +# here are some other fitted parameters that can be printed: +print "Fitted EQW:", sp.specfit.EQW() +print "Direct EQW:", sp.specfit.EQW(fitted=False) +print "Approximate FWHM:", sp.specfit.measure_approximate_fwhm() +print "Approximate FWHM (with interpolation):", sp.specfit.measure_approximate_fwhm(interpolate_factor=10) + +# zoom in further for a detailed view of the profile fit +sp.plotter.axis.set_xlim(6562-150,6562+150) +sp.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_zoomzoom.png") + +# now we'll re-do the fit with the He I line subtracted off +# first, create a copy of the spectrum +just_halpha = sp.copy() + +# Second, subtract off the model fit for the He I component +# (identify it by looking at the fitted central wavelengths) +just_halpha.data -= sp.specfit.modelcomponents[2,:] + +# re-plot +just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3) + +# this time, subtract off the baseline - we're now confident that the continuum +# fit is good enough +just_halpha.baseline(xmin=6100, xmax=7000, + exclude=[6450,6746,6815,6884,7003,7126,7506,7674,8142,8231], + subtract=True, reset_selection=True, highlight_fitregion=True, order=2) + +# Do a 3-component fit now that the Helium line is gone +# I've added some limits here because I know what parameters I expect of my fitted line +just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1, + 0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1, + 0.10506431180136294, 6589.9310414408683, 50.378997529374672, 1,], + fittype='voigt', + xmin=6100,xmax=7000, + limitedmax=[False,False,True,True]*3, + limitedmin=[True,False,True,True]*3, + limits=[(0,0),(0,0),(0,100),(0,100)]*3) + +# overplot the components and residuals again +just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1) +just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False) + +# The "optimal chi^2" isn't a real statistical concept, it's something I made up +# However, I think it makes sense (but post an issue if you disagree!): +# It uses the fitted model to find all pixels that are above the noise in the spectrum +# then computes chi^2/n using only those pixels +just_halpha.specfit.annotate(chi2='optimal') + +# save the figure +just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp.png") + +# A new zoom-in figure +import pylab + +# now hide the legend +just_halpha.specfit.fitleg.set_visible(False) +# overplot a y=0 line through the residuals (for reference) +pylab.plot([6100,7000],[-0.2,-0.2],'y--') +# zoom vertically +pylab.gca().set_ylim(-0.3,0.3) +# redraw & save +pylab.draw() +just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_threecomp_zoom.png") + + +# Part of the reason for doing the above work is to demonstrate that a +# 3-component fit is better than a 2-component fit +# +# So, now we do the same as above with a 2-component fit + +just_halpha.plotter(xmin=6100,xmax=7000,ymax=2.00,ymin=-0.3) +just_halpha.specfit(guesses=[2.4007096541802202, 6563.2307968382256, 3.5653446153950314, 1, + 0.53985149324131965, 6564.3460908526877, 19.443226155616617, 1], + fittype='voigt') +just_halpha.specfit.plot_components(add_baseline=False,component_yoffset=-0.1) +just_halpha.specfit.plotresiduals(axis=just_halpha.plotter.axis,clear=False,yoffset=-0.20,label=False) +just_halpha.specfit.annotate(chi2='optimal') +just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp.png") +just_halpha.specfit.fitleg.set_visible(False) +pylab.plot([6100,7000],[-0.2,-0.2],'y--') +pylab.gca().set_ylim(-0.3,0.3) +pylab.draw() +just_halpha.plotter.savefig("SN2009ip_UT121002_Halpha_voigt_twocomp_zoom.png") + + +# urllib3/filepost.py +# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# +# This module is part of urllib3 and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import codecs +import mimetypes + +from uuid import uuid4 +from io import BytesIO + +from .packages import six +from .packages.six import b +from .fields import RequestField + +writer = codecs.lookup('utf-8')[3] + + +def choose_boundary(): + """ + Our embarassingly-simple replacement for mimetools.choose_boundary. + """ + return uuid4().hex + + +def iter_field_objects(fields): + """ + Iterate over fields. + + Supports list of (k, v) tuples and dicts, and lists of + :class:`~urllib3.fields.RequestField`. + + """ + if isinstance(fields, dict): + i = six.iteritems(fields) + else: + i = iter(fields) + + for field in i: + if isinstance(field, RequestField): + yield field + else: + yield RequestField.from_tuples(*field) + + +def iter_fields(fields): + """ + .. deprecated:: 1.6 + + Iterate over fields. + + The addition of :class:`~urllib3.fields.RequestField` makes this function + obsolete. Instead, use :func:`iter_field_objects`, which returns + :class:`~urllib3.fields.RequestField` objects. + + Supports list of (k, v) tuples and dicts. + """ + if isinstance(fields, dict): + return ((k, v) for k, v in six.iteritems(fields)) + + return ((k, v) for k, v in fields) + + +def encode_multipart_formdata(fields, boundary=None): + """ + Encode a dictionary of ``fields`` using the multipart/form-data MIME format. + + :param fields: + Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). + + :param boundary: + If not specified, then a random boundary will be generated using + :func:`mimetools.choose_boundary`. + """ + body = BytesIO() + if boundary is None: + boundary = choose_boundary() + + for field in iter_field_objects(fields): + body.write(b('--%s\r\n' % (boundary))) + + writer(body).write(field.render_headers()) + data = field.data + + if isinstance(data, int): + data = str(data) # Backwards compatibility + + if isinstance(data, six.text_type): + writer(body).write(data) + else: + body.write(data) + + body.write(b'\r\n') + + body.write(b('--%s--\r\n' % (boundary))) + + content_type = str('multipart/form-data; boundary=%s' % boundary) + + return body.getvalue(), content_type + +#!/usr/bin/env python +"""A class for representing built-in EE API Function. + +Earth Engine can dynamically produce a JSON array listing the +algorithms available to the user. Each item in the dictionary identifies +the name and return type of the algorithm, the name and type of its +arguments, whether they're required or optional, default values and docs +for each argument and the algorithms as a whole. + +This class manages the algorithm dictionary and creates JavaScript functions +to apply each EE algorithm. +""" + + + +# Using lowercase function naming to match the JavaScript names. +# pylint: disable=g-bad-name + +import copy +import keyword +import re + +import data +import deprecation +import ee_exception +import ee_types +import function + + +class ApiFunction(function.Function): + """An object representing an EE API Function.""" + + # A dictionary of functions defined by the API server. + _api = None + + # A set of algorithm names containing all algorithms that have been bound to + # a function so far using importApi(). + _bound_signatures = set() + + def __init__(self, name, opt_signature=None): + """Creates a function defined by the EE API. + + Args: + name: The name of the function. + opt_signature: The signature of the function. If unspecified, + looked up dynamically. + """ + if opt_signature is None: + opt_signature = ApiFunction.lookup(name).getSignature() + + # The signature of this API function. + self._signature = copy.deepcopy(opt_signature) + self._signature['name'] = name + + def __eq__(self, other): + return (isinstance(other, ApiFunction) and + self.getSignature() == other.getSignature()) + + def __ne__(self, other): + return not self.__eq__(other) + + @classmethod + def call_(cls, name, *args, **kwargs): + """Call a named API function with positional and keyword arguments. + + Args: + name: The name of the API function to call. + *args: Positional arguments to pass to the function. + **kwargs: Keyword arguments to pass to the function. + + Returns: + An object representing the called function. If the signature specifies + a recognized return type, the returned value will be cast to that type. + """ + return cls.lookup(name).call(*args, **kwargs) + + @classmethod + def apply_(cls, name, named_args): + """Call a named API function with a dictionary of named arguments. + + Args: + name: The name of the API function to call. + named_args: A dictionary of arguments to the function. + + Returns: + An object representing the called function. If the signature specifies + a recognized return type, the returned value will be cast to that type. + """ + return cls.lookup(name).apply(named_args) + + def encode(self, unused_encoder): + return self._signature['name'] + + def getSignature(self): + """Returns a description of the interface provided by this function.""" + return self._signature + + @classmethod + def allSignatures(cls): + """Returns a map from the name to signature for all API functions.""" + cls.initialize() + return dict([(name, func.getSignature()) + for name, func in cls._api.iteritems()]) + + @classmethod + def unboundFunctions(cls): + """Returns the functions that have not been bound using importApi() yet.""" + cls.initialize() + return dict([(name, func) for name, func in cls._api.iteritems() + if name not in cls._bound_signatures]) + + @classmethod + def lookup(cls, name): + """Looks up an API function by name. + + Args: + name: The name of the function to get. + + Returns: + The requested ApiFunction. + """ + result = cls.lookupInternal(name) + if not name: + raise ee_exception.EEException( + 'Unknown built-in function name: %s' % name) + return result + + @classmethod + def lookupInternal(cls, name): + """Looks up an API function by name. + + Args: + name: The name of the function to get. + + Returns: + The requested ApiFunction or None if not found. + """ + cls.initialize() + return cls._api.get(name, None) + + @classmethod + def initialize(cls): + """Initializes the list of signatures from the Earth Engine front-end.""" + if not cls._api: + signatures = data.getAlgorithms() + api = {} + for name, sig in signatures.iteritems(): + # Strip type parameters. + sig['returns'] = re.sub('<.*>', '', sig['returns']) + for arg in sig['args']: + arg['type'] = re.sub('<.*>', '', arg['type']) + api[name] = cls(name, sig) + cls._api = api + + @classmethod + def reset(cls): + """Clears the API functions list so it will be reloaded from the server.""" + cls._api = None + cls._bound_signatures = set() + + @classmethod + def importApi(cls, target, prefix, type_name, opt_prepend=None): + """Adds all API functions that begin with a given prefix to a target class. + + Args: + target: The class to add to. + prefix: The prefix to search for in the signatures. + type_name: The name of the object's type. Functions whose + first argument matches this type are bound as instance methods, and + those whose first argument doesn't match are bound as static methods. + opt_prepend: An optional string to prepend to the names of the + added functions. + """ + cls.initialize() + prepend = opt_prepend or '' + for name, api_func in cls._api.iteritems(): + parts = name.split('.') + if len(parts) == 2 and parts[0] == prefix: + fname = prepend + parts[1] + signature = api_func.getSignature() + + cls._bound_signatures.add(name) + + # Specifically handle the function names that are illegal in python. + if keyword.iskeyword(fname): + fname = fname.title() + + # Don't overwrite existing versions of this function. + if (hasattr(target, fname) and + not hasattr(getattr(target, fname), 'signature')): + continue + + # Create a new function so we can attach properties to it. + def MakeBoundFunction(func): + # We need the lambda to capture "func" from the enclosing scope. + return lambda *args, **kwargs: func.call(*args, **kwargs) # pylint: disable=unnecessary-lambda + bound_function = MakeBoundFunction(api_func) + + # Add docs. + setattr(bound_function, '__name__', name.encode('utf8')) + bound_function.__doc__ = str(api_func) + # Attach the signature object for documentation generators. + bound_function.signature = signature + + # Mark as deprecated if needed. + if signature.get('deprecated'): + deprecated_decorator = deprecation.Deprecated(signature['deprecated']) + bound_function = deprecated_decorator(bound_function) + + # Decide whether this is a static or an instance function. + is_instance = (signature['args'] and + ee_types.isSubtype(signature['args'][0]['type'], + type_name)) + if not is_instance: + bound_function = staticmethod(bound_function) + + # Attach the function as a method. + setattr(target, fname, bound_function) + + @staticmethod + def clearApi(target): + """Removes all methods added by importApi() from a target class. + + Args: + target: The class to remove from. + """ + for attr_name in dir(target): + attr_value = getattr(target, attr_name) + if callable(attr_value) and hasattr(attr_value, 'signature'): + delattr(target, attr_name) + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for TPUClusterResolver.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.contrib.cluster_resolver.python.training.tpu_cluster_resolver import TPUClusterResolver +from tensorflow.python.platform import test +from tensorflow.python.training import server_lib + + +mock = test.mock + + +class MockRequestClass(object): + + def __init__(self, name, tpu_map): + self._name = name + self._tpu_map = tpu_map + + def execute(self): + if self._name in self._tpu_map: + return self._tpu_map[self._name] + else: + raise KeyError('Resource %s was not found' % self._name) + + +class MockNodeClass(object): + + def __init__(self, tpu_map): + self._tpu_map = tpu_map + + def get(self, name): + return MockRequestClass(name, self._tpu_map) + + +class TPUClusterResolverTest(test.TestCase): + + def _verifyClusterSpecEquality(self, cluster_spec, expected_proto): + """Verifies that the ClusterSpec generates the correct proto. + + We are testing this four different ways to ensure that the ClusterSpec + returned by the TPUClusterResolver behaves identically to a normal + ClusterSpec when passed into the generic ClusterSpec libraries. + + Args: + cluster_spec: ClusterSpec returned by the TPUClusterResolver + expected_proto: Expected protobuf + """ + self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def()) + self.assertProtoEquals( + expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def()) + self.assertProtoEquals( + expected_proto, + server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def()) + self.assertProtoEquals( + expected_proto, + server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def()) + + def mock_service_client( + self, + tpu_map=None): + + if tpu_map is None: + tpu_map = {} + + mock_locations = mock.MagicMock() + mock_locations.nodes.return_value = MockNodeClass(tpu_map) + + mock_project = mock.MagicMock() + mock_project.locations.return_value = mock_locations + + mock_client = mock.MagicMock() + mock_client.projects.return_value = mock_project + + return mock_client + + def testSimpleSuccessfulRetrieval(self): + tpu_map = { + 'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': { + 'ipAddress': '10.1.2.3', + 'port': '8470' + } + } + + tpu_cluster_resolver = TPUClusterResolver( + project='test-project', + zone='us-central1-c', + tpu_names=['test-tpu-1'], + credentials=None, + service=self.mock_service_client(tpu_map=tpu_map)) + + actual_cluster_spec = tpu_cluster_resolver.cluster_spec() + expected_proto = """ + job { name: 'tpu_worker' tasks { key: 0 value: '10.1.2.3:8470' } } + """ + self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto) + + def testMultipleSuccessfulRetrieval(self): + tpu_map = { + 'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': { + 'ipAddress': '10.1.2.3', + 'port': '8470' + }, + 'projects/test-project/locations/us-central1-c/nodes/test-tpu-2': { + 'ipAddress': '10.4.5.6', + 'port': '8470' + } + } + + tpu_cluster_resolver = TPUClusterResolver( + project='test-project', + zone='us-central1-c', + tpu_names=['test-tpu-2', 'test-tpu-1'], + credentials=None, + service=self.mock_service_client(tpu_map=tpu_map)) + + actual_cluster_spec = tpu_cluster_resolver.cluster_spec() + expected_proto = """ + job { name: 'tpu_worker' tasks { key: 0 value: '10.4.5.6:8470' } + tasks { key: 1 value: '10.1.2.3:8470' } } + """ + self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto) + + def testGetMasterMultipleEntries(self): + tpu_map = { + 'projects/test-project/locations/us-central1-c/nodes/test-tpu-1': { + 'ipAddress': '10.1.2.3', + 'port': '8470' + }, + 'projects/test-project/locations/us-central1-c/nodes/test-tpu-2': { + 'ipAddress': '10.4.5.6', + 'port': '8470' + } + } + + tpu_cluster_resolver = TPUClusterResolver( + project='test-project', + zone='us-central1-c', + tpu_names=['test-tpu-2', 'test-tpu-1'], + credentials=None, + service=self.mock_service_client(tpu_map=tpu_map)) + self.assertEqual('grpc://10.4.5.6:8470', tpu_cluster_resolver.get_master()) + + def testGetMasterNoEntries(self): + tpu_map = {} + + tpu_cluster_resolver = TPUClusterResolver( + project='test-project', + zone='us-central1-c', + tpu_names=[], + credentials=None, + service=self.mock_service_client(tpu_map=tpu_map)) + with self.assertRaises(ValueError): + tpu_cluster_resolver.get_master() + +if __name__ == '__main__': + test.main() + +# Authors: +# Petr Viktorin +# +# Copyright (C) 2013 Red Hat +# see file 'COPYING' for use and warranty information +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import six + +from ipalib import errors, api, _ +from ipapython.dn import DN +from ipatests.test_xmlrpc.xmlrpc_test import Declarative +from ipatests.test_xmlrpc.test_user_plugin import get_user_result +from ipatests.test_xmlrpc import objectclasses +import pytest + +if six.PY3: + unicode = str + +radius1 = u'testradius' +radius1_fqdn = u'testradius.test' +radius1_dn = DN(('cn=testradius'), ('cn=radiusproxy'), api.env.basedn) +user1 = u'tuser1' +password1 = u'very*secure123' +password1_bytes = password1.encode('ascii') + + +@pytest.mark.tier1 +class test_raduisproxy(Declarative): + + cleanup_commands = [ + ('radiusproxy_del', [radius1], {}), + ('user_del', [user1], {}), + ] + + tests = [ + + dict( + desc='Try to retrieve non-existent %r' % radius1, + command=('radiusproxy_show', [radius1], {}), + expected=errors.NotFound( + reason=u'%s: RADIUS proxy server not found' % radius1), + ), + + + dict( + desc='Try to update non-existent %r' % radius1, + command=('radiusproxy_mod', [radius1], {}), + expected=errors.NotFound( + reason=_('%s: RADIUS proxy server not found') % radius1), + ), + + + dict( + desc='Try to delete non-existent %r' % radius1, + command=('radiusproxy_del', [radius1], {}), + expected=errors.NotFound( + reason=_('%s: RADIUS proxy server not found') % radius1), + ), + + + dict( + desc='Create %r' % radius1, + command=('radiusproxy_add', [radius1], + dict( + ipatokenradiusserver=radius1_fqdn, + ipatokenradiussecret=password1, + ), + ), + expected=dict( + value=radius1, + summary=u'Added RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + dn=radius1_dn, + ipatokenradiussecret=[password1_bytes], + ipatokenradiusserver=[radius1_fqdn], + objectclass=objectclasses.radiusproxy, + + ), + ), + ), + + + dict( + desc='Try to create duplicate %r' % radius1, + command=('radiusproxy_add', [radius1], + dict( + ipatokenradiusserver=radius1_fqdn, + ipatokenradiussecret=password1, + ), + ), + expected=errors.DuplicateEntry(message=_('RADIUS proxy server ' + 'with name "%s" already exists') % radius1), + ), + + + dict( + desc='Retrieve %r' % radius1, + command=('radiusproxy_show', [radius1], {}), + expected=dict( + value=radius1, + summary=None, + result=dict( + cn=[radius1], + dn=radius1_dn, + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + + dict( + desc='Retrieve %r with all=True' % radius1, + command=('radiusproxy_show', [radius1], dict(all=True)), + expected=dict( + value=radius1, + summary=None, + result=dict( + cn=[radius1], + dn=radius1_dn, + ipatokenradiussecret=[password1_bytes], + ipatokenradiusserver=[radius1_fqdn], + objectclass=objectclasses.radiusproxy, + ), + ), + ), + + ] + [ + dict( + desc='Set timeout of %s to %s (valid)' % (radius1, num), + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiustimeout=num)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ipatokenradiustimeout=[unicode(num)], + ), + ), + ) + for num in (1, 100) + ] + [ + + dict( + desc='Set timeout of %s to 0 (invalid)' % radius1, + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiustimeout=0)), + expected=errors.ValidationError( + name='timeout', error=_('must be at least 1')), + ), + + dict( + desc='Unset timeout of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiustimeout=None)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + ] + [ + dict( + desc='Set retries of %s to %s (valid)' % (radius1, num), + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusretries=num)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ipatokenradiusretries=[unicode(num)], + ), + ), + ) + for num in (0, 4, 10) + ] + [ + dict( + desc='Set retries of %s to %s (invalid)' % (radius1, num), + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusretries=num)), + expected=errors.ValidationError( + name='retries', error=reason), + ) + for num, reason in ((-1, 'must be at least 0'), + (11, 'can be at most 10'), + (100, 'can be at most 10')) + ] + [ + + dict( + desc='Unset retries of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusretries=None)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + ] + [ + dict( + desc='Set server string of %s to %s (valid)' % (radius1, fqdn), + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusserver=fqdn)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[fqdn], + ), + ), + ) + for fqdn in (radius1_fqdn + u':12345', radius1_fqdn) + ] + [ + dict( + desc='Set server string of %s to %s (invalid)' % (radius1, fqdn), + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusserver=fqdn)), + expected=errors.ValidationError(name='ipatokenradiusserver', + error=error), + ) + for fqdn, error in ( + (radius1_fqdn + u':0x5a', 'invalid port number'), + (radius1_fqdn + u':1:2:3', + "only letters, numbers, '_', '-' are allowed. DNS label may not " + "start or end with '-'"), + (u'bogus', 'not fully qualified'), + ) + ] + [ + + dict( + desc='Try to unset server string of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(ipatokenradiusserver=None)), + expected=errors.RequirementError(name='server'), + ), + + dict( + desc='Set userattr of %s to %s (valid)' % (radius1, u'cn'), + command=('radiusproxy_mod', [radius1], + dict(ipatokenusermapattribute=u'cn')), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ipatokenusermapattribute=[u'cn'], + ), + ), + ), + + dict( + desc='Set userattr of %s to %s (invalid)' % (radius1, u'$%^&*'), + command=('radiusproxy_mod', [radius1], + dict(ipatokenusermapattribute=u'$%^&*')), + expected=errors.ValidationError(name='ipatokenusermapattribute', + error=u'invalid attribute name'), + ), + + dict( + desc='Unset userattr of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(ipatokenusermapattribute=None)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + dict( + desc='Set desc of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(description=u'a virtual radius server')), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + description=[u'a virtual radius server'], + ), + ), + ), + + dict( + desc='Unset desc of %s' % radius1, + command=('radiusproxy_mod', [radius1], + dict(description=None)), + expected=dict( + value=radius1, + summary=u'Modified RADIUS proxy server "%s"' % radius1, + result=dict( + cn=[radius1], + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + dict( + desc='Create "%s"' % user1, + command=( + 'user_add', [user1], dict(givenname=u'Test', sn=u'User1') + ), + expected=dict( + value=user1, + summary=u'Added user "%s"' % user1, + result=get_user_result(user1, u'Test', u'User1', 'add'), + ), + ), + + + dict( + desc='Set radiusconfiglink of %r' % user1, + command=('user_mod', [user1], + dict(ipatokenradiusconfiglink=radius1,)), + expected=dict( + result=get_user_result(user1, u'Test', u'User1', 'mod', + ipatokenradiusconfiglink=[radius1]), + value=user1, + summary='Modified user "%s"' % user1, + ), + ), + + dict( + desc='Retrieve %r to verify %s is output' % (radius1, user1), + command=('radiusproxy_show', [radius1], {}), + expected=dict( + value=radius1, + summary=None, + result=dict( + cn=[radius1], + dn=radius1_dn, + ipatokenradiusserver=[radius1_fqdn], + ), + ), + ), + + dict( + desc='Retrieve %r to verify %s is output' % (user1, radius1), + command=('user_show', [user1], {}), + expected=dict( + value=user1, + summary=None, + result=get_user_result(user1, u'Test', u'User1', 'show', + ipatokenradiusconfiglink=[radius1]), + ), + ), + + dict( + desc='Delete %r' % radius1, + command=('radiusproxy_del', [radius1], {}), + expected=dict( + value=[radius1], + summary=u'Deleted RADIUS proxy server "%s"' % radius1, + result=dict(failed=[]), + ), + ), + + dict( + desc='Retrieve %s to verify link is deleted' % user1, + command=('user_show', [user1], {}), + expected=dict( + value=user1, + summary=None, + result=get_user_result(user1, u'Test', u'User1', 'show'), + ), + ), + + ] + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from ducktape.mark import parametrize +from ducktape.mark.resource import cluster +from ducktape.tests.test import Test +from ducktape.utils.util import wait_until + +from kafkatest.services.kafka import KafkaService +from kafkatest.services.verifiable_producer import VerifiableProducer +from kafkatest.services.zookeeper import ZookeeperService +from kafkatest.utils import is_version +from kafkatest.version import LATEST_0_8_2, LATEST_0_9, LATEST_0_10_0, LATEST_0_10_1, DEV_BRANCH, KafkaVersion + + +class TestVerifiableProducer(Test): + """Sanity checks on verifiable producer service class.""" + def __init__(self, test_context): + super(TestVerifiableProducer, self).__init__(test_context) + + self.topic = "topic" + self.zk = ZookeeperService(test_context, num_nodes=1) + self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, + topics={self.topic: {"partitions": 1, "replication-factor": 1}}) + + self.num_messages = 1000 + # This will produce to source kafka cluster + self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, + max_messages=self.num_messages, throughput=self.num_messages/5) + + def setUp(self): + self.zk.start() + self.kafka.start() + + @cluster(num_nodes=3) + @parametrize(producer_version=str(LATEST_0_8_2)) + @parametrize(producer_version=str(LATEST_0_9)) + @parametrize(producer_version=str(LATEST_0_10_0)) + @parametrize(producer_version=str(LATEST_0_10_1)) + @parametrize(producer_version=str(DEV_BRANCH)) + def test_simple_run(self, producer_version=DEV_BRANCH): + """ + Test that we can start VerifiableProducer on the current branch snapshot version or against the 0.8.2 jar, and + verify that we can produce a small number of messages. + """ + node = self.producer.nodes[0] + node.version = KafkaVersion(producer_version) + self.producer.start() + wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5, + err_msg="Producer failed to start in a reasonable amount of time.") + + # using version.vstring (distutils.version.LooseVersion) is a tricky way of ensuring + # that this check works with DEV_BRANCH + # When running VerifiableProducer 0.8.X, both the current branch version and 0.8.X should show up because of the + # way verifiable producer pulls in some development directories into its classpath + # + # If the test fails here because 'ps .. | grep' couldn't find the process it means + # the login and grep that is_version() performs is slower than + # the time it takes the producer to produce its messages. + # Easy fix is to decrease throughput= above, the good fix is to make the producer + # not terminate until explicitly killed in this case. + if node.version <= LATEST_0_8_2: + assert is_version(node, [node.version.vstring, DEV_BRANCH.vstring], logger=self.logger) + else: + assert is_version(node, [node.version.vstring], logger=self.logger) + + self.producer.wait() + num_produced = self.producer.num_acked + assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % (num_produced, self.num_messages) + + + +#! /usr/bin/env python +# encoding: utf-8 +# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file + +import os,shutil,sys,platform +from waflib import TaskGen,Task,Build,Options,Utils,Errors +from waflib.TaskGen import taskgen_method,feature,after_method,before_method +app_info=''' + + + + + CFBundlePackageType + APPL + CFBundleGetInfoString + Created by Waf + CFBundleSignature + ???? + NOTE + THIS IS A GENERATED FILE, DO NOT MODIFY + CFBundleExecutable + %s + + +''' +@feature('c','cxx') +def set_macosx_deployment_target(self): + if self.env['MACOSX_DEPLOYMENT_TARGET']: + os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET'] + elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ: + if Utils.unversioned_sys_platform()=='darwin': + os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2]) +@taskgen_method +def create_bundle_dirs(self,name,out): + bld=self.bld + dir=out.parent.find_or_declare(name) + dir.mkdir() + macos=dir.find_or_declare(['Contents','MacOS']) + macos.mkdir() + return dir +def bundle_name_for_output(out): + name=out.name + k=name.rfind('.') + if k>=0: + name=name[:k]+'.app' + else: + name=name+'.app' + return name +@feature('cprogram','cxxprogram') +@after_method('apply_link') +def create_task_macapp(self): + if self.env['MACAPP']or getattr(self,'mac_app',False): + out=self.link_task.outputs[0] + name=bundle_name_for_output(out) + dir=self.create_bundle_dirs(name,out) + n1=dir.find_or_declare(['Contents','MacOS',out.name]) + self.apptask=self.create_task('macapp',self.link_task.outputs,n1) + inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/MacOS/'%name + self.bld.install_files(inst_to,n1,chmod=Utils.O755) + if getattr(self,'mac_resources',None): + res_dir=n1.parent.parent.make_node('Resources') + inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name + for x in self.to_list(self.mac_resources): + node=self.path.find_node(x) + if not node: + raise Errors.WafError('Missing mac_resource %r in %r'%(x,self)) + parent=node.parent + if os.path.isdir(node.abspath()): + nodes=node.ant_glob('**') + else: + nodes=[node] + for node in nodes: + rel=node.path_from(parent) + tsk=self.create_task('macapp',node,res_dir.make_node(rel)) + self.bld.install_as(inst_to+'/%s'%rel,node) + if getattr(self.bld,'is_install',None): + self.install_task.hasrun=Task.SKIP_ME +@feature('cprogram','cxxprogram') +@after_method('apply_link') +def create_task_macplist(self): + if self.env['MACAPP']or getattr(self,'mac_app',False): + out=self.link_task.outputs[0] + name=bundle_name_for_output(out) + dir=self.create_bundle_dirs(name,out) + n1=dir.find_or_declare(['Contents','Info.plist']) + self.plisttask=plisttask=self.create_task('macplist',[],n1) + if getattr(self,'mac_plist',False): + node=self.path.find_resource(self.mac_plist) + if node: + plisttask.inputs.append(node) + else: + plisttask.code=self.mac_plist + else: + plisttask.code=app_info%self.link_task.outputs[0].name + inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/'%name + self.bld.install_files(inst_to,n1) +@feature('cshlib','cxxshlib') +@before_method('apply_link','propagate_uselib_vars') +def apply_bundle(self): + if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False): + self.env['LINKFLAGS_cshlib']=self.env['LINKFLAGS_cxxshlib']=[] + self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['macbundle_PATTERN'] + use=self.use=self.to_list(getattr(self,'use',[])) + if not'MACBUNDLE'in use: + use.append('MACBUNDLE') +app_dirs=['Contents','Contents/MacOS','Contents/Resources'] +class macapp(Task.Task): + color='PINK' + def run(self): + self.outputs[0].parent.mkdir() + shutil.copy2(self.inputs[0].srcpath(),self.outputs[0].abspath()) +class macplist(Task.Task): + color='PINK' + ext_in=['.bin'] + def run(self): + if getattr(self,'code',None): + txt=self.code + else: + txt=self.inputs[0].read() + self.outputs[0].write(txt) + +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""Convenience module importing everything from backports.configparser.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + + +from backports.configparser import ( + RawConfigParser, + ConfigParser, + SafeConfigParser, + SectionProxy, + + Interpolation, + BasicInterpolation, + ExtendedInterpolation, + LegacyInterpolation, + + Error, + NoSectionError, + DuplicateSectionError, + DuplicateOptionError, + NoOptionError, + InterpolationError, + InterpolationMissingOptionError, + InterpolationSyntaxError, + InterpolationDepthError, + ParsingError, + MissingSectionHeaderError, + ConverterMapping, + + _UNSET, + DEFAULTSECT, + MAX_INTERPOLATION_DEPTH, + _default_dict, + _ChainMap, +) + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +# NOTE: names missing from __all__ imported anyway for backwards compatibility. + +from __future__ import unicode_literals + +import difflib +import errno +import json +import os +import posixpath +import socket +import sys +import threading +import unittest +import warnings +from collections import Counter +from contextlib import contextmanager +from copy import copy +from functools import wraps +from unittest.util import safe_repr + +from django.apps import apps +from django.conf import settings +from django.core import mail +from django.core.exceptions import ImproperlyConfigured, ValidationError +from django.core.files import locks +from django.core.handlers.wsgi import WSGIHandler, get_path_info +from django.core.management import call_command +from django.core.management.color import no_style +from django.core.management.sql import emit_post_migrate_signal +from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer +from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction +from django.forms.fields import CharField +from django.http import QueryDict +from django.test.client import Client +from django.test.html import HTMLParseError, parse_html +from django.test.signals import setting_changed, template_rendered +from django.test.utils import ( + CaptureQueriesContext, ContextList, compare_xml, modify_settings, + override_settings, +) +from django.utils import six +from django.utils.decorators import classproperty +from django.utils.deprecation import RemovedInDjango20Warning +from django.utils.encoding import force_text +from django.utils.six.moves.urllib.parse import ( + unquote, urljoin, urlparse, urlsplit, urlunsplit, +) +from django.utils.six.moves.urllib.request import url2pathname +from django.views.static import serve + +__all__ = ('TestCase', 'TransactionTestCase', + 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') + + +def to_list(value): + """ + Puts value into a list if it's not already one. + Returns an empty list if value is None. + """ + if value is None: + value = [] + elif not isinstance(value, list): + value = [value] + return value + + +def assert_and_parse_html(self, html, user_msg, msg): + try: + dom = parse_html(html) + except HTMLParseError as e: + standardMsg = '%s\n%s' % (msg, e) + self.fail(self._formatMessage(user_msg, standardMsg)) + return dom + + +class _AssertNumQueriesContext(CaptureQueriesContext): + def __init__(self, test_case, num, connection): + self.test_case = test_case + self.num = num + super(_AssertNumQueriesContext, self).__init__(connection) + + def __exit__(self, exc_type, exc_value, traceback): + super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback) + if exc_type is not None: + return + executed = len(self) + self.test_case.assertEqual( + executed, self.num, + "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( + executed, self.num, + '\n'.join( + query['sql'] for query in self.captured_queries + ) + ) + ) + + +class _AssertTemplateUsedContext(object): + def __init__(self, test_case, template_name): + self.test_case = test_case + self.template_name = template_name + self.rendered_templates = [] + self.rendered_template_names = [] + self.context = ContextList() + + def on_template_render(self, sender, signal, template, context, **kwargs): + self.rendered_templates.append(template) + self.rendered_template_names.append(template.name) + self.context.append(copy(context)) + + def test(self): + return self.template_name in self.rendered_template_names + + def message(self): + return '%s was not rendered.' % self.template_name + + def __enter__(self): + template_rendered.connect(self.on_template_render) + return self + + def __exit__(self, exc_type, exc_value, traceback): + template_rendered.disconnect(self.on_template_render) + if exc_type is not None: + return + + if not self.test(): + message = self.message() + if len(self.rendered_templates) == 0: + message += ' No template was rendered.' + else: + message += ' Following templates were rendered: %s' % ( + ', '.join(self.rendered_template_names)) + self.test_case.fail(message) + + +class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): + def test(self): + return self.template_name not in self.rendered_template_names + + def message(self): + return '%s was rendered.' % self.template_name + + +class _CursorFailure(object): + def __init__(self, cls_name, wrapped): + self.cls_name = cls_name + self.wrapped = wrapped + + def __call__(self): + raise AssertionError( + "Database queries aren't allowed in SimpleTestCase. " + "Either use TestCase or TransactionTestCase to ensure proper test isolation or " + "set %s.allow_database_queries to True to silence this failure." % self.cls_name + ) + + +class SimpleTestCase(unittest.TestCase): + + # The class we'll use for the test client self.client. + # Can be overridden in derived classes. + client_class = Client + _overridden_settings = None + _modified_settings = None + + # Tests shouldn't be allowed to query the database since + # this base class doesn't enforce any isolation. + allow_database_queries = False + + @classmethod + def setUpClass(cls): + super(SimpleTestCase, cls).setUpClass() + if cls._overridden_settings: + cls._cls_overridden_context = override_settings(**cls._overridden_settings) + cls._cls_overridden_context.enable() + if cls._modified_settings: + cls._cls_modified_context = modify_settings(cls._modified_settings) + cls._cls_modified_context.enable() + if not cls.allow_database_queries: + for alias in connections: + connection = connections[alias] + connection.cursor = _CursorFailure(cls.__name__, connection.cursor) + + @classmethod + def tearDownClass(cls): + if not cls.allow_database_queries: + for alias in connections: + connection = connections[alias] + connection.cursor = connection.cursor.wrapped + if hasattr(cls, '_cls_modified_context'): + cls._cls_modified_context.disable() + delattr(cls, '_cls_modified_context') + if hasattr(cls, '_cls_overridden_context'): + cls._cls_overridden_context.disable() + delattr(cls, '_cls_overridden_context') + super(SimpleTestCase, cls).tearDownClass() + + def __call__(self, result=None): + """ + Wrapper around default __call__ method to perform common Django test + set up. This means that user-defined Test Cases aren't required to + include a call to super().setUp(). + """ + testMethod = getattr(self, self._testMethodName) + skipped = ( + getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False) + ) + + if not skipped: + try: + self._pre_setup() + except Exception: + result.addError(self, sys.exc_info()) + return + super(SimpleTestCase, self).__call__(result) + if not skipped: + try: + self._post_teardown() + except Exception: + result.addError(self, sys.exc_info()) + return + + def _pre_setup(self): + """Performs any pre-test setup. This includes: + + * Creating a test client. + * Clearing the mail test outbox. + """ + self.client = self.client_class() + mail.outbox = [] + + def _post_teardown(self): + """Perform any post-test things.""" + pass + + def settings(self, **kwargs): + """ + A context manager that temporarily sets a setting and reverts to the original value when exiting the context. + """ + return override_settings(**kwargs) + + def modify_settings(self, **kwargs): + """ + A context manager that temporarily applies changes a list setting and + reverts back to the original value when exiting the context. + """ + return modify_settings(**kwargs) + + def assertRedirects(self, response, expected_url, status_code=302, + target_status_code=200, host=None, msg_prefix='', + fetch_redirect_response=True): + """Asserts that a response redirected to a specific URL, and that the + redirect URL can be loaded. + + Note that assertRedirects won't work for external links since it uses + TestClient to do a request (use fetch_redirect_response=False to check + such links without fetching them). + """ + if host is not None: + warnings.warn( + "The host argument is deprecated and no longer used by assertRedirects", + RemovedInDjango20Warning, stacklevel=2 + ) + + if msg_prefix: + msg_prefix += ": " + + if hasattr(response, 'redirect_chain'): + # The request was a followed redirect + self.assertTrue( + len(response.redirect_chain) > 0, + msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" + % (response.status_code, status_code) + ) + + self.assertEqual( + response.redirect_chain[0][1], status_code, + msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)" + % (response.redirect_chain[0][1], status_code) + ) + + url, status_code = response.redirect_chain[-1] + scheme, netloc, path, query, fragment = urlsplit(url) + + self.assertEqual( + response.status_code, target_status_code, + msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)" + % (response.status_code, target_status_code) + ) + + else: + # Not a followed redirect + self.assertEqual( + response.status_code, status_code, + msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" + % (response.status_code, status_code) + ) + + url = response.url + scheme, netloc, path, query, fragment = urlsplit(url) + + # Prepend the request path to handle relative path redirects. + if not path.startswith('/'): + url = urljoin(response.request['PATH_INFO'], url) + path = urljoin(response.request['PATH_INFO'], path) + + if fetch_redirect_response: + redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https')) + + # Get the redirection page, using the same client that was used + # to obtain the original response. + self.assertEqual( + redirect_response.status_code, target_status_code, + msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)" + % (path, redirect_response.status_code, target_status_code) + ) + + if url != expected_url: + # For temporary backwards compatibility, try to compare with a relative url + e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url) + relative_url = urlunsplit(('', '', e_path, e_query, e_fragment)) + if url == relative_url: + warnings.warn( + "assertRedirects had to strip the scheme and domain from the " + "expected URL, as it was always added automatically to URLs " + "before Django 1.9. Please update your expected URLs by " + "removing the scheme and domain.", + RemovedInDjango20Warning, stacklevel=2) + expected_url = relative_url + + self.assertEqual( + url, expected_url, + msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url) + ) + + def _assert_contains(self, response, text, status_code, msg_prefix, html): + # If the response supports deferred rendering and hasn't been rendered + # yet, then ensure that it does get rendered before proceeding further. + if hasattr(response, 'render') and callable(response.render) and not response.is_rendered: + response.render() + + if msg_prefix: + msg_prefix += ": " + + self.assertEqual( + response.status_code, status_code, + msg_prefix + "Couldn't retrieve content: Response code was %d" + " (expected %d)" % (response.status_code, status_code) + ) + + if response.streaming: + content = b''.join(response.streaming_content) + else: + content = response.content + if not isinstance(text, bytes) or html: + text = force_text(text, encoding=response.charset) + content = content.decode(response.charset) + text_repr = "'%s'" % text + else: + text_repr = repr(text) + if html: + content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") + text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") + real_count = content.count(text) + return (text_repr, real_count, msg_prefix) + + def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): + """ + Asserts that a response indicates that some content was retrieved + successfully, (i.e., the HTTP status code was as expected), and that + ``text`` occurs ``count`` times in the content of the response. + If ``count`` is None, the count doesn't matter - the assertion is true + if the text occurs at least once in the response. + """ + text_repr, real_count, msg_prefix = self._assert_contains( + response, text, status_code, msg_prefix, html) + + if count is not None: + self.assertEqual( + real_count, count, + msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count) + ) + else: + self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) + + def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): + """ + Asserts that a response indicates that some content was retrieved + successfully, (i.e., the HTTP status code was as expected), and that + ``text`` doesn't occurs in the content of the response. + """ + text_repr, real_count, msg_prefix = self._assert_contains( + response, text, status_code, msg_prefix, html) + + self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr) + + def assertFormError(self, response, form, field, errors, msg_prefix=''): + """ + Asserts that a form used to render the response has a specific field + error. + """ + if msg_prefix: + msg_prefix += ": " + + # Put context(s) into a list to simplify processing. + contexts = to_list(response.context) + if not contexts: + self.fail(msg_prefix + "Response did not use any contexts to render the response") + + # Put error(s) into a list to simplify processing. + errors = to_list(errors) + + # Search all contexts for the error. + found_form = False + for i, context in enumerate(contexts): + if form not in context: + continue + found_form = True + for err in errors: + if field: + if field in context[form].errors: + field_errors = context[form].errors[field] + self.assertTrue( + err in field_errors, + msg_prefix + "The field '%s' on form '%s' in" + " context %d does not contain the error '%s'" + " (actual errors: %s)" % + (field, form, i, err, repr(field_errors)) + ) + elif field in context[form].fields: + self.fail( + msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" % + (field, form, i) + ) + else: + self.fail( + msg_prefix + "The form '%s' in context %d does not contain the field '%s'" % + (form, i, field) + ) + else: + non_field_errors = context[form].non_field_errors() + self.assertTrue( + err in non_field_errors, + msg_prefix + "The form '%s' in context %d does not" + " contain the non-field error '%s'" + " (actual errors: %s)" % + (form, i, err, non_field_errors) + ) + if not found_form: + self.fail(msg_prefix + "The form '%s' was not used to render the response" % form) + + def assertFormsetError(self, response, formset, form_index, field, errors, + msg_prefix=''): + """ + Asserts that a formset used to render the response has a specific error. + + For field errors, specify the ``form_index`` and the ``field``. + For non-field errors, specify the ``form_index`` and the ``field`` as + None. + For non-form errors, specify ``form_index`` as None and the ``field`` + as None. + """ + # Add punctuation to msg_prefix + if msg_prefix: + msg_prefix += ": " + + # Put context(s) into a list to simplify processing. + contexts = to_list(response.context) + if not contexts: + self.fail(msg_prefix + 'Response did not use any contexts to ' + 'render the response') + + # Put error(s) into a list to simplify processing. + errors = to_list(errors) + + # Search all contexts for the error. + found_formset = False + for i, context in enumerate(contexts): + if formset not in context: + continue + found_formset = True + for err in errors: + if field is not None: + if field in context[formset].forms[form_index].errors: + field_errors = context[formset].forms[form_index].errors[field] + self.assertTrue( + err in field_errors, + msg_prefix + "The field '%s' on formset '%s', " + "form %d in context %d does not contain the " + "error '%s' (actual errors: %s)" % + (field, formset, form_index, i, err, repr(field_errors)) + ) + elif field in context[formset].forms[form_index].fields: + self.fail( + msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors" + % (field, formset, form_index, i) + ) + else: + self.fail( + msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'" + % (formset, form_index, i, field) + ) + elif form_index is not None: + non_field_errors = context[formset].forms[form_index].non_field_errors() + self.assertFalse( + len(non_field_errors) == 0, + msg_prefix + "The formset '%s', form %d in context %d " + "does not contain any non-field errors." % (formset, form_index, i) + ) + self.assertTrue( + err in non_field_errors, + msg_prefix + "The formset '%s', form %d in context %d " + "does not contain the non-field error '%s' (actual errors: %s)" + % (formset, form_index, i, err, repr(non_field_errors)) + ) + else: + non_form_errors = context[formset].non_form_errors() + self.assertFalse( + len(non_form_errors) == 0, + msg_prefix + "The formset '%s' in context %d does not " + "contain any non-form errors." % (formset, i) + ) + self.assertTrue( + err in non_form_errors, + msg_prefix + "The formset '%s' in context %d does not " + "contain the non-form error '%s' (actual errors: %s)" + % (formset, i, err, repr(non_form_errors)) + ) + if not found_formset: + self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset) + + def _assert_template_used(self, response, template_name, msg_prefix): + + if response is None and template_name is None: + raise TypeError('response and/or template_name argument must be provided') + + if msg_prefix: + msg_prefix += ": " + + if template_name is not None and response is not None and not hasattr(response, 'templates'): + raise ValueError( + "assertTemplateUsed() and assertTemplateNotUsed() are only " + "usable on responses fetched using the Django test Client." + ) + + if not hasattr(response, 'templates') or (response is None and template_name): + if response: + template_name = response + response = None + # use this template with context manager + return template_name, None, msg_prefix + + template_names = [t.name for t in response.templates if t.name is not None] + return None, template_names, msg_prefix + + def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None): + """ + Asserts that the template with the provided name was used in rendering + the response. Also usable as context manager. + """ + context_mgr_template, template_names, msg_prefix = self._assert_template_used( + response, template_name, msg_prefix) + + if context_mgr_template: + # Use assertTemplateUsed as context manager. + return _AssertTemplateUsedContext(self, context_mgr_template) + + if not template_names: + self.fail(msg_prefix + "No templates used to render the response") + self.assertTrue( + template_name in template_names, + msg_prefix + "Template '%s' was not a template used to render" + " the response. Actual template(s) used: %s" + % (template_name, ', '.join(template_names)) + ) + + if count is not None: + self.assertEqual( + template_names.count(template_name), count, + msg_prefix + "Template '%s' was expected to be rendered %d " + "time(s) but was actually rendered %d time(s)." + % (template_name, count, template_names.count(template_name)) + ) + + def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): + """ + Asserts that the template with the provided name was NOT used in + rendering the response. Also usable as context manager. + """ + context_mgr_template, template_names, msg_prefix = self._assert_template_used( + response, template_name, msg_prefix + ) + if context_mgr_template: + # Use assertTemplateNotUsed as context manager. + return _AssertTemplateNotUsedContext(self, context_mgr_template) + + self.assertFalse( + template_name in template_names, + msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name + ) + + @contextmanager + def _assert_raises_message_cm(self, expected_exception, expected_message): + with self.assertRaises(expected_exception) as cm: + yield cm + self.assertIn(expected_message, str(cm.exception)) + + def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs): + """ + Asserts that expected_message is found in the the message of a raised + exception. + + Args: + expected_exception: Exception class expected to be raised. + expected_message: expected error message string value. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + """ + # callable_obj was a documented kwarg in Django 1.8 and older. + callable_obj = kwargs.pop('callable_obj', None) + if callable_obj: + warnings.warn( + 'The callable_obj kwarg is deprecated. Pass the callable ' + 'as a positional argument instead.', RemovedInDjango20Warning + ) + elif len(args): + callable_obj = args[0] + args = args[1:] + + cm = self._assert_raises_message_cm(expected_exception, expected_message) + # Assertion used in context manager fashion. + if callable_obj is None: + return cm + # Assertion was passed a callable. + with cm: + callable_obj(*args, **kwargs) + + def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, + field_kwargs=None, empty_value=''): + """ + Asserts that a form field behaves correctly with various inputs. + + Args: + fieldclass: the class of the field to be tested. + valid: a dictionary mapping valid inputs to their expected + cleaned values. + invalid: a dictionary mapping invalid inputs to one or more + raised error messages. + field_args: the args passed to instantiate the field + field_kwargs: the kwargs passed to instantiate the field + empty_value: the expected clean output for inputs in empty_values + """ + if field_args is None: + field_args = [] + if field_kwargs is None: + field_kwargs = {} + required = fieldclass(*field_args, **field_kwargs) + optional = fieldclass(*field_args, **dict(field_kwargs, required=False)) + # test valid inputs + for input, output in valid.items(): + self.assertEqual(required.clean(input), output) + self.assertEqual(optional.clean(input), output) + # test invalid inputs + for input, errors in invalid.items(): + with self.assertRaises(ValidationError) as context_manager: + required.clean(input) + self.assertEqual(context_manager.exception.messages, errors) + + with self.assertRaises(ValidationError) as context_manager: + optional.clean(input) + self.assertEqual(context_manager.exception.messages, errors) + # test required inputs + error_required = [force_text(required.error_messages['required'])] + for e in required.empty_values: + with self.assertRaises(ValidationError) as context_manager: + required.clean(e) + self.assertEqual(context_manager.exception.messages, error_required) + self.assertEqual(optional.clean(e), empty_value) + # test that max_length and min_length are always accepted + if issubclass(fieldclass, CharField): + field_kwargs.update({'min_length': 2, 'max_length': 20}) + self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) + + def assertHTMLEqual(self, html1, html2, msg=None): + """ + Asserts that two HTML snippets are semantically the same. + Whitespace in most cases is ignored, and attribute ordering is not + significant. The passed-in arguments must be valid HTML. + """ + dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') + dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') + + if dom1 != dom2: + standardMsg = '%s != %s' % ( + safe_repr(dom1, True), safe_repr(dom2, True)) + diff = ('\n' + '\n'.join(difflib.ndiff( + six.text_type(dom1).splitlines(), + six.text_type(dom2).splitlines(), + ))) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertHTMLNotEqual(self, html1, html2, msg=None): + """Asserts that two HTML snippets are not semantically equivalent.""" + dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') + dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') + + if dom1 == dom2: + standardMsg = '%s == %s' % ( + safe_repr(dom1, True), safe_repr(dom2, True)) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertInHTML(self, needle, haystack, count=None, msg_prefix=''): + needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') + haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') + real_count = haystack.count(needle) + if count is not None: + self.assertEqual( + real_count, count, + msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count) + ) + else: + self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) + + def assertJSONEqual(self, raw, expected_data, msg=None): + """ + Asserts that the JSON fragments raw and expected_data are equal. + Usual JSON non-significant whitespace rules apply as the heavyweight + is delegated to the json library. + """ + try: + data = json.loads(raw) + except ValueError: + self.fail("First argument is not valid JSON: %r" % raw) + if isinstance(expected_data, six.string_types): + try: + expected_data = json.loads(expected_data) + except ValueError: + self.fail("Second argument is not valid JSON: %r" % expected_data) + self.assertEqual(data, expected_data, msg=msg) + + def assertJSONNotEqual(self, raw, expected_data, msg=None): + """ + Asserts that the JSON fragments raw and expected_data are not equal. + Usual JSON non-significant whitespace rules apply as the heavyweight + is delegated to the json library. + """ + try: + data = json.loads(raw) + except ValueError: + self.fail("First argument is not valid JSON: %r" % raw) + if isinstance(expected_data, six.string_types): + try: + expected_data = json.loads(expected_data) + except ValueError: + self.fail("Second argument is not valid JSON: %r" % expected_data) + self.assertNotEqual(data, expected_data, msg=msg) + + def assertXMLEqual(self, xml1, xml2, msg=None): + """ + Asserts that two XML snippets are semantically the same. + Whitespace in most cases is ignored, and attribute ordering is not + significant. The passed-in arguments must be valid XML. + """ + try: + result = compare_xml(xml1, xml2) + except Exception as e: + standardMsg = 'First or second argument is not valid XML\n%s' % e + self.fail(self._formatMessage(msg, standardMsg)) + else: + if not result: + standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) + diff = ('\n' + '\n'.join( + difflib.ndiff( + six.text_type(xml1).splitlines(), + six.text_type(xml2).splitlines(), + ) + )) + standardMsg = self._truncateMessage(standardMsg, diff) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertXMLNotEqual(self, xml1, xml2, msg=None): + """ + Asserts that two XML snippets are not semantically equivalent. + Whitespace in most cases is ignored, and attribute ordering is not + significant. The passed-in arguments must be valid XML. + """ + try: + result = compare_xml(xml1, xml2) + except Exception as e: + standardMsg = 'First or second argument is not valid XML\n%s' % e + self.fail(self._formatMessage(msg, standardMsg)) + else: + if result: + standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) + self.fail(self._formatMessage(msg, standardMsg)) + + +class TransactionTestCase(SimpleTestCase): + + # Subclasses can ask for resetting of auto increment sequence before each + # test case + reset_sequences = False + + # Subclasses can enable only a subset of apps for faster tests + available_apps = None + + # Subclasses can define fixtures which will be automatically installed. + fixtures = None + + # If transactions aren't available, Django will serialize the database + # contents into a fixture during setup and flush and reload them + # during teardown (as flush does not restore data from migrations). + # This can be slow; this flag allows enabling on a per-case basis. + serialized_rollback = False + + # Since tests will be wrapped in a transaction, or serialized if they + # are not available, we allow queries to be run. + allow_database_queries = True + + def _pre_setup(self): + """Performs any pre-test setup. This includes: + + * If the class has an 'available_apps' attribute, restricting the app + registry to these applications, then firing post_migrate -- it must + run with the correct set of applications for the test case. + * If the class has a 'fixtures' attribute, installing these fixtures. + """ + super(TransactionTestCase, self)._pre_setup() + if self.available_apps is not None: + apps.set_available_apps(self.available_apps) + setting_changed.send( + sender=settings._wrapped.__class__, + setting='INSTALLED_APPS', + value=self.available_apps, + enter=True, + ) + for db_name in self._databases_names(include_mirrors=False): + emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) + try: + self._fixture_setup() + except Exception: + if self.available_apps is not None: + apps.unset_available_apps() + setting_changed.send( + sender=settings._wrapped.__class__, + setting='INSTALLED_APPS', + value=settings.INSTALLED_APPS, + enter=False, + ) + raise + + @classmethod + def _databases_names(cls, include_mirrors=True): + # If the test case has a multi_db=True flag, act on all databases, + # including mirrors or not. Otherwise, just on the default DB. + if getattr(cls, 'multi_db', False): + return [ + alias for alias in connections + if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR'] + ] + else: + return [DEFAULT_DB_ALIAS] + + def _reset_sequences(self, db_name): + conn = connections[db_name] + if conn.features.supports_sequence_reset: + sql_list = conn.ops.sequence_reset_by_name_sql( + no_style(), conn.introspection.sequence_list()) + if sql_list: + with transaction.atomic(using=db_name): + cursor = conn.cursor() + for sql in sql_list: + cursor.execute(sql) + + def _fixture_setup(self): + for db_name in self._databases_names(include_mirrors=False): + # Reset sequences + if self.reset_sequences: + self._reset_sequences(db_name) + + # If we need to provide replica initial data from migrated apps, + # then do so. + if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"): + if self.available_apps is not None: + apps.unset_available_apps() + connections[db_name].creation.deserialize_db_from_string( + connections[db_name]._test_serialized_contents + ) + if self.available_apps is not None: + apps.set_available_apps(self.available_apps) + + if self.fixtures: + # We have to use this slightly awkward syntax due to the fact + # that we're using *args and **kwargs together. + call_command('loaddata', *self.fixtures, + **{'verbosity': 0, 'database': db_name}) + + def _should_reload_connections(self): + return True + + def _post_teardown(self): + """Performs any post-test things. This includes: + + * Flushing the contents of the database, to leave a clean slate. If + the class has an 'available_apps' attribute, post_migrate isn't fired. + * Force-closing the connection, so the next test gets a clean cursor. + """ + try: + self._fixture_teardown() + super(TransactionTestCase, self)._post_teardown() + if self._should_reload_connections(): + # Some DB cursors include SQL statements as part of cursor + # creation. If you have a test that does a rollback, the effect + # of these statements is lost, which can affect the operation of + # tests (e.g., losing a timezone setting causing objects to be + # created with the wrong time). To make sure this doesn't + # happen, get a clean connection at the start of every test. + for conn in connections.all(): + conn.close() + finally: + if self.available_apps is not None: + apps.unset_available_apps() + setting_changed.send(sender=settings._wrapped.__class__, + setting='INSTALLED_APPS', + value=settings.INSTALLED_APPS, + enter=False) + + def _fixture_teardown(self): + # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal + # when flushing only a subset of the apps + for db_name in self._databases_names(include_mirrors=False): + # Flush the database + inhibit_post_migrate = ( + self.available_apps is not None or + ( # Inhibit the post_migrate signal when using serialized + # rollback to avoid trying to recreate the serialized data. + self.serialized_rollback and + hasattr(connections[db_name], '_test_serialized_contents') + ) + ) + call_command('flush', verbosity=0, interactive=False, + database=db_name, reset_sequences=False, + allow_cascade=self.available_apps is not None, + inhibit_post_migrate=inhibit_post_migrate) + + def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None): + items = six.moves.map(transform, qs) + if not ordered: + return self.assertEqual(Counter(items), Counter(values), msg=msg) + values = list(values) + # For example qs.iterator() could be passed as qs, but it does not + # have 'ordered' attribute. + if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: + raise ValueError("Trying to compare non-ordered queryset " + "against more than one ordered values") + return self.assertEqual(list(items), values, msg=msg) + + def assertNumQueries(self, num, func=None, *args, **kwargs): + using = kwargs.pop("using", DEFAULT_DB_ALIAS) + conn = connections[using] + + context = _AssertNumQueriesContext(self, num, conn) + if func is None: + return context + + with context: + func(*args, **kwargs) + + +def connections_support_transactions(): + """ + Returns True if all connections support transactions. + """ + return all(conn.features.supports_transactions + for conn in connections.all()) + + +class TestCase(TransactionTestCase): + """ + Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve + test isolation. + + In most situations, TestCase should be preferred to TransactionTestCase as + it allows faster execution. However, there are some situations where using + TransactionTestCase might be necessary (e.g. testing some transactional + behavior). + + On database backends with no transaction support, TestCase behaves as + TransactionTestCase. + """ + @classmethod + def _enter_atomics(cls): + """Helper method to open atomic blocks for multiple databases""" + atomics = {} + for db_name in cls._databases_names(): + atomics[db_name] = transaction.atomic(using=db_name) + atomics[db_name].__enter__() + return atomics + + @classmethod + def _rollback_atomics(cls, atomics): + """Rollback atomic blocks opened through the previous method""" + for db_name in reversed(cls._databases_names()): + transaction.set_rollback(True, using=db_name) + atomics[db_name].__exit__(None, None, None) + + @classmethod + def setUpClass(cls): + super(TestCase, cls).setUpClass() + if not connections_support_transactions(): + return + cls.cls_atomics = cls._enter_atomics() + + if cls.fixtures: + for db_name in cls._databases_names(include_mirrors=False): + try: + call_command('loaddata', *cls.fixtures, **{ + 'verbosity': 0, + 'commit': False, + 'database': db_name, + }) + except Exception: + cls._rollback_atomics(cls.cls_atomics) + raise + try: + cls.setUpTestData() + except Exception: + cls._rollback_atomics(cls.cls_atomics) + raise + + @classmethod + def tearDownClass(cls): + if connections_support_transactions(): + cls._rollback_atomics(cls.cls_atomics) + for conn in connections.all(): + conn.close() + super(TestCase, cls).tearDownClass() + + @classmethod + def setUpTestData(cls): + """Load initial data for the TestCase""" + pass + + def _should_reload_connections(self): + if connections_support_transactions(): + return False + return super(TestCase, self)._should_reload_connections() + + def _fixture_setup(self): + if not connections_support_transactions(): + # If the backend does not support transactions, we should reload + # class data before each test + self.setUpTestData() + return super(TestCase, self)._fixture_setup() + + assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' + self.atomics = self._enter_atomics() + + def _fixture_teardown(self): + if not connections_support_transactions(): + return super(TestCase, self)._fixture_teardown() + try: + for db_name in reversed(self._databases_names()): + if self._should_check_constraints(connections[db_name]): + connections[db_name].check_constraints() + finally: + self._rollback_atomics(self.atomics) + + def _should_check_constraints(self, connection): + return ( + connection.features.can_defer_constraint_checks and + not connection.needs_rollback and connection.is_usable() + ) + + +class CheckCondition(object): + """Descriptor class for deferred condition checking""" + def __init__(self, cond_func): + self.cond_func = cond_func + + def __get__(self, instance, cls=None): + return self.cond_func() + + +def _deferredSkip(condition, reason): + def decorator(test_func): + if not (isinstance(test_func, type) and + issubclass(test_func, unittest.TestCase)): + @wraps(test_func) + def skip_wrapper(*args, **kwargs): + if condition(): + raise unittest.SkipTest(reason) + return test_func(*args, **kwargs) + test_item = skip_wrapper + else: + # Assume a class is decorated + test_item = test_func + test_item.__unittest_skip__ = CheckCondition(condition) + test_item.__unittest_skip_why__ = reason + return test_item + return decorator + + +def skipIfDBFeature(*features): + """ + Skip a test if a database has at least one of the named features. + """ + return _deferredSkip( + lambda: any(getattr(connection.features, feature, False) for feature in features), + "Database has feature(s) %s" % ", ".join(features) + ) + + +def skipUnlessDBFeature(*features): + """ + Skip a test unless a database has all the named features. + """ + return _deferredSkip( + lambda: not all(getattr(connection.features, feature, False) for feature in features), + "Database doesn't support feature(s): %s" % ", ".join(features) + ) + + +def skipUnlessAnyDBFeature(*features): + """ + Skip a test unless a database has any of the named features. + """ + return _deferredSkip( + lambda: not any(getattr(connection.features, feature, False) for feature in features), + "Database doesn't support any of the feature(s): %s" % ", ".join(features) + ) + + +class QuietWSGIRequestHandler(WSGIRequestHandler): + """ + Just a regular WSGIRequestHandler except it doesn't log to the standard + output any of the requests received, so as to not clutter the output for + the tests' results. + """ + + def log_message(*args): + pass + + +class FSFilesHandler(WSGIHandler): + """ + WSGI middleware that intercepts calls to a directory, as defined by one of + the *_ROOT settings, and serves those files, publishing them under *_URL. + """ + def __init__(self, application): + self.application = application + self.base_url = urlparse(self.get_base_url()) + super(FSFilesHandler, self).__init__() + + def _should_handle(self, path): + """ + Checks if the path should be handled. Ignores the path if: + + * the host is provided as part of the base_url + * the request's path isn't under the media path (or equal) + """ + return path.startswith(self.base_url[2]) and not self.base_url[1] + + def file_path(self, url): + """ + Returns the relative path to the file on disk for the given URL. + """ + relative_url = url[len(self.base_url[2]):] + return url2pathname(relative_url) + + def get_response(self, request): + from django.http import Http404 + + if self._should_handle(request.path): + try: + return self.serve(request) + except Http404: + pass + return super(FSFilesHandler, self).get_response(request) + + def serve(self, request): + os_rel_path = self.file_path(request.path) + os_rel_path = posixpath.normpath(unquote(os_rel_path)) + # Emulate behavior of django.contrib.staticfiles.views.serve() when it + # invokes staticfiles' finders functionality. + # TODO: Modify if/when that internal API is refactored + final_rel_path = os_rel_path.replace('\\', '/').lstrip('/') + return serve(request, final_rel_path, document_root=self.get_base_dir()) + + def __call__(self, environ, start_response): + if not self._should_handle(get_path_info(environ)): + return self.application(environ, start_response) + return super(FSFilesHandler, self).__call__(environ, start_response) + + +class _StaticFilesHandler(FSFilesHandler): + """ + Handler for serving static files. A private class that is meant to be used + solely as a convenience by LiveServerThread. + """ + + def get_base_dir(self): + return settings.STATIC_ROOT + + def get_base_url(self): + return settings.STATIC_URL + + +class _MediaFilesHandler(FSFilesHandler): + """ + Handler for serving the media files. A private class that is meant to be + used solely as a convenience by LiveServerThread. + """ + + def get_base_dir(self): + return settings.MEDIA_ROOT + + def get_base_url(self): + return settings.MEDIA_URL + + +class LiveServerThread(threading.Thread): + """ + Thread for running a live http server while the tests are running. + """ + + def __init__(self, host, possible_ports, static_handler, connections_override=None): + self.host = host + self.port = None + self.possible_ports = possible_ports + self.is_ready = threading.Event() + self.error = None + self.static_handler = static_handler + self.connections_override = connections_override + super(LiveServerThread, self).__init__() + + def run(self): + """ + Sets up the live server and databases, and then loops over handling + http requests. + """ + if self.connections_override: + # Override this thread's database connections with the ones + # provided by the main thread. + for alias, conn in self.connections_override.items(): + connections[alias] = conn + try: + # Create the handler for serving static and media files + handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) + + # Go through the list of possible ports, hoping that we can find + # one that is free to use for the WSGI server. + for index, port in enumerate(self.possible_ports): + try: + self.httpd = self._create_server(port) + except socket.error as e: + if (index + 1 < len(self.possible_ports) and + e.errno == errno.EADDRINUSE): + # This port is already in use, so we go on and try with + # the next one in the list. + continue + else: + # Either none of the given ports are free or the error + # is something else than "Address already in use". So + # we let that error bubble up to the main thread. + raise + else: + # A free port was found. + self.port = port + break + + self.httpd.set_app(handler) + self.is_ready.set() + self.httpd.serve_forever() + except Exception as e: + self.error = e + self.is_ready.set() + + def _create_server(self, port): + return WSGIServer((self.host, port), QuietWSGIRequestHandler, allow_reuse_address=False) + + def terminate(self): + if hasattr(self, 'httpd'): + # Stop the WSGI server + self.httpd.shutdown() + self.httpd.server_close() + + +class LiveServerTestCase(TransactionTestCase): + """ + Does basically the same as TransactionTestCase but also launches a live + http server in a separate thread so that the tests may use another testing + framework, such as Selenium for example, instead of the built-in dummy + client. + Note that it inherits from TransactionTestCase instead of TestCase because + the threads do not share the same transactions (unless if using in-memory + sqlite) and each thread needs to commit all their transactions so that the + other thread can see the changes. + """ + + static_handler = _StaticFilesHandler + + @classproperty + def live_server_url(cls): + return 'http://%s:%s' % ( + cls.server_thread.host, cls.server_thread.port) + + @classmethod + def setUpClass(cls): + super(LiveServerTestCase, cls).setUpClass() + connections_override = {} + for conn in connections.all(): + # If using in-memory sqlite databases, pass the connections to + # the server thread. + if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']): + # Explicitly enable thread-shareability for this connection + conn.allow_thread_sharing = True + connections_override[conn.alias] = conn + + # Launch the live server's thread + specified_address = os.environ.get( + 'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081-8179') + + # The specified ports may be of the form '8000-8010,8080,9200-9300' + # i.e. a comma-separated list of ports or ranges of ports, so we break + # it down into a detailed list of all possible ports. + possible_ports = [] + try: + host, port_ranges = specified_address.split(':') + for port_range in port_ranges.split(','): + # A port range can be of either form: '8000' or '8000-8010'. + extremes = list(map(int, port_range.split('-'))) + assert len(extremes) in [1, 2] + if len(extremes) == 1: + # Port range of the form '8000' + possible_ports.append(extremes[0]) + else: + # Port range of the form '8000-8010' + for port in range(extremes[0], extremes[1] + 1): + possible_ports.append(port) + except Exception: + msg = 'Invalid address ("%s") for live server.' % specified_address + six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2]) + cls.server_thread = cls._create_server_thread(host, possible_ports, connections_override) + cls.server_thread.daemon = True + cls.server_thread.start() + + # Wait for the live server to be ready + cls.server_thread.is_ready.wait() + if cls.server_thread.error: + # Clean up behind ourselves, since tearDownClass won't get called in + # case of errors. + cls._tearDownClassInternal() + raise cls.server_thread.error + + @classmethod + def _create_server_thread(cls, host, possible_ports, connections_override): + return LiveServerThread( + host, + possible_ports, + cls.static_handler, + connections_override=connections_override, + ) + + @classmethod + def _tearDownClassInternal(cls): + # There may not be a 'server_thread' attribute if setUpClass() for some + # reasons has raised an exception. + if hasattr(cls, 'server_thread'): + # Terminate the live server's thread + cls.server_thread.terminate() + cls.server_thread.join() + + # Restore sqlite in-memory database connections' non-shareability + for conn in connections.all(): + if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']): + conn.allow_thread_sharing = False + + @classmethod + def tearDownClass(cls): + cls._tearDownClassInternal() + super(LiveServerTestCase, cls).tearDownClass() + + +class SerializeMixin(object): + """ + Mixin to enforce serialization of TestCases that share a common resource. + + Define a common 'lockfile' for each set of TestCases to serialize. This + file must exist on the filesystem. + + Place it early in the MRO in order to isolate setUpClass / tearDownClass. + """ + + lockfile = None + + @classmethod + def setUpClass(cls): + if cls.lockfile is None: + raise ValueError( + "{}.lockfile isn't set. Set it to a unique value " + "in the base class.".format(cls.__name__)) + cls._lockfile = open(cls.lockfile) + locks.lock(cls._lockfile, locks.LOCK_EX) + super(SerializeMixin, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(SerializeMixin, cls).tearDownClass() + cls._lockfile.close() + +# -*- coding: utf-8 -*- +""" + werkzeug.testsuite.local + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Local and local proxy tests. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import time +import unittest +from threading import Thread + +from werkzeug.testsuite import WerkzeugTestCase + +from werkzeug import local + + +class LocalTestCase(WerkzeugTestCase): + + def test_basic_local(self): + l = local.Local() + l.foo = 0 + values = [] + def value_setter(idx): + time.sleep(0.01 * idx) + l.foo = idx + time.sleep(0.02) + values.append(l.foo) + threads = [Thread(target=value_setter, args=(x,)) + for x in [1, 2, 3]] + for thread in threads: + thread.start() + time.sleep(0.2) + assert sorted(values) == [1, 2, 3] + + def delfoo(): + del l.foo + delfoo() + self.assert_raises(AttributeError, lambda: l.foo) + self.assert_raises(AttributeError, delfoo) + + local.release_local(l) + + def test_local_release(self): + loc = local.Local() + loc.foo = 42 + local.release_local(loc) + assert not hasattr(loc, 'foo') + + ls = local.LocalStack() + ls.push(42) + local.release_local(ls) + assert ls.top is None + + def test_local_proxy(self): + foo = [] + ls = local.LocalProxy(lambda: foo) + ls.append(42) + ls.append(23) + ls[1:] = [1, 2, 3] + assert foo == [42, 1, 2, 3] + assert repr(foo) == repr(ls) + assert foo[0] == 42 + foo += [1] + assert list(foo) == [42, 1, 2, 3, 1] + + def test_local_proxy_operations_math(self): + foo = 2 + ls = local.LocalProxy(lambda: foo) + assert ls + 1 == 3 + assert 1 + ls == 3 + assert ls - 1 == 1 + assert 1 - ls == -1 + assert ls * 1 == 2 + assert 1 * ls == 2 + assert ls / 1 == 2 + assert 1.0 / ls == 0.5 + assert ls // 1.0 == 2.0 + assert 1.0 // ls == 0.0 + assert ls % 2 == 0 + assert 2 % ls == 0 + + def test_local_proxy_operations_strings(self): + foo = "foo" + ls = local.LocalProxy(lambda: foo) + assert ls + "bar" == "foobar" + assert "bar" + ls == "barfoo" + assert ls * 2 == "foofoo" + + foo = "foo %s" + assert ls % ("bar",) == "foo bar" + + def test_local_stack(self): + ident = local.get_ident() + + ls = local.LocalStack() + assert ident not in ls._local.__storage__ + assert ls.top is None + ls.push(42) + assert ident in ls._local.__storage__ + assert ls.top == 42 + ls.push(23) + assert ls.top == 23 + ls.pop() + assert ls.top == 42 + ls.pop() + assert ls.top is None + assert ls.pop() is None + assert ls.pop() is None + + proxy = ls() + ls.push([1, 2]) + assert proxy == [1, 2] + ls.push((1, 2)) + assert proxy == (1, 2) + ls.pop() + ls.pop() + assert repr(proxy) == '' + + assert ident not in ls._local.__storage__ + + def test_local_proxies_with_callables(self): + foo = 42 + ls = local.LocalProxy(lambda: foo) + assert ls == 42 + foo = [23] + ls.append(42) + assert ls == [23, 42] + assert foo == [23, 42] + + def test_custom_idents(self): + ident = 0 + loc = local.Local() + stack = local.LocalStack() + mgr = local.LocalManager([loc, stack], ident_func=lambda: ident) + + loc.foo = 42 + stack.push({'foo': 42}) + ident = 1 + loc.foo = 23 + stack.push({'foo': 23}) + ident = 0 + assert loc.foo == 42 + assert stack.top['foo'] == 42 + stack.pop() + assert stack.top is None + ident = 1 + assert loc.foo == 23 + assert stack.top['foo'] == 23 + stack.pop() + assert stack.top is None + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(LocalTestCase)) + return suite + +""" +Base IO code for all datasets +""" + +# Copyright (c) 2007 David Cournapeau +# 2010 Fabian Pedregosa +# 2010 Olivier Grisel +# License: BSD 3 clause + +import os +import csv +import shutil +import warnings +from os import environ +from os.path import dirname +from os.path import join +from os.path import exists +from os.path import expanduser +from os.path import isdir +from os import listdir +from os import makedirs + +import numpy as np + +from ..utils import check_random_state + + +class Bunch(dict): + """Container object for datasets: dictionary-like object that + exposes its keys as attributes.""" + + def __init__(self, **kwargs): + dict.__init__(self, kwargs) + self.__dict__ = self + + +def get_data_home(data_home=None): + """Return the path of the scikit-learn data dir. + + This folder is used by some large dataset loaders to avoid + downloading the data several times. + + By default the data dir is set to a folder named 'scikit_learn_data' + in the user home folder. + + Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment + variable or programmatically by giving an explit folder path. The + '~' symbol is expanded to the user home folder. + + If the folder does not already exist, it is automatically created. + """ + if data_home is None: + data_home = environ.get('SCIKIT_LEARN_DATA', + join('~', 'scikit_learn_data')) + data_home = expanduser(data_home) + if not exists(data_home): + makedirs(data_home) + return data_home + + +def clear_data_home(data_home=None): + """Delete all the content of the data home cache.""" + data_home = get_data_home(data_home) + shutil.rmtree(data_home) + + +def load_files(container_path, description=None, categories=None, + load_content=True, shuffle=True, encoding=None, + charset=None, charset_error=None, + decode_error='strict', random_state=0): + """Load text files with categories as subfolder names. + + Individual samples are assumed to be files stored a two levels folder + structure such as the following: + + container_folder/ + category_1_folder/ + file_1.txt + file_2.txt + ... + file_42.txt + category_2_folder/ + file_43.txt + file_44.txt + ... + + The folder names are used has supervised signal label names. The + individual file names are not important. + + This function does not try to extract features into a numpy array or + scipy sparse matrix. In addition, if load_content is false it + does not try to load the files in memory. + + To use text files in a scikit-learn classification or clustering + algorithm, you will need to use the `sklearn.feature_extraction.text` + module to build a feature extraction transformer that suits your + problem. + + If you set load_content=True, you should also specify the encoding of + the text using the 'encoding' parameter. For many modern text files, + 'utf-8' will be the correct encoding. If you leave encoding equal to None, + then the content will be made of bytes instead of Unicode, and you will + not be able to use most functions in `sklearn.feature_extraction.text`. + + Similar feature extractors should be built for other kind of unstructured + data input such as images, audio, video, ... + + Parameters + ---------- + container_path : string or unicode + Path to the main folder holding one subfolder per category + + description: string or unicode, optional (default=None) + A paragraph describing the characteristic of the dataset: its source, + reference, etc. + + categories : A collection of strings or None, optional (default=None) + If None (default), load all the categories. + If not None, list of category names to load (other categories ignored). + + load_content : boolean, optional (default=True) + Whether to load or not the content of the different files. If + true a 'data' attribute containing the text information is present + in the data structure returned. If not, a filenames attribute + gives the path to the files. + + encoding : string or None (default is None) + If None, do not try to decode the content of the files (e.g. for + images or other non-text content). + If not None, encoding to use to decode text files to Unicode if + load_content is True. + + decode_error: {'strict', 'ignore', 'replace'}, optional + Instruction on what to do if a byte sequence is given to analyze that + contains characters not of the given `encoding`. Passed as keyword + argument 'errors' to bytes.decode. + + shuffle : bool, optional (default=True) + Whether or not to shuffle the data: might be important for models that + make the assumption that the samples are independent and identically + distributed (i.i.d.), such as stochastic gradient descent. + + random_state : int, RandomState instance or None, optional (default=0) + If int, random_state is the seed used by the random number generator; + If RandomState instance, random_state is the random number generator; + If None, the random number generator is the RandomState instance used + by `np.random`. + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: either + data, the raw text data to learn, or 'filenames', the files + holding it, 'target', the classification labels (integer index), + 'target_names', the meaning of the labels, and 'DESCR', the full + description of the dataset. + """ + if charset is not None: + warnings.warn("The charset parameter is deprecated as of version " + "0.14 and will be removed in 0.16. Use encode instead.", + DeprecationWarning) + encoding = charset + + if charset_error is not None: + warnings.warn("The charset_error parameter is deprecated as of " + "version 0.14 and will be removed in 0.16. Use " + "decode_error instead.", + DeprecationWarning) + decode_error = charset_error + + target = [] + target_names = [] + filenames = [] + + folders = [f for f in sorted(listdir(container_path)) + if isdir(join(container_path, f))] + + if categories is not None: + folders = [f for f in folders if f in categories] + + for label, folder in enumerate(folders): + target_names.append(folder) + folder_path = join(container_path, folder) + documents = [join(folder_path, d) + for d in sorted(listdir(folder_path))] + target.extend(len(documents) * [label]) + filenames.extend(documents) + + # convert to array for fancy indexing + filenames = np.array(filenames) + target = np.array(target) + + if shuffle: + random_state = check_random_state(random_state) + indices = np.arange(filenames.shape[0]) + random_state.shuffle(indices) + filenames = filenames[indices] + target = target[indices] + + if load_content: + data = [open(filename, 'rb').read() for filename in filenames] + if encoding is not None: + data = [d.decode(encoding, decode_error) for d in data] + return Bunch(data=data, + filenames=filenames, + target_names=target_names, + target=target, + DESCR=description) + + return Bunch(filenames=filenames, + target_names=target_names, + target=target, + DESCR=description) + + +def load_iris(): + """Load and return the iris dataset (classification). + + The iris dataset is a classic and very easy multi-class classification + dataset. + + ================= ============== + Classes 3 + Samples per class 50 + Samples total 150 + Dimensionality 4 + Features real, positive + ================= ============== + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: + 'data', the data to learn, 'target', the classification labels, + 'target_names', the meaning of the labels, 'feature_names', the + meaning of the features, and 'DESCR', the + full description of the dataset. + + Examples + -------- + Let's say you are interested in the samples 10, 25, and 50, and want to + know their class name. + + >>> from sklearn.datasets import load_iris + >>> data = load_iris() + >>> data.target[[10, 25, 50]] + array([0, 0, 1]) + >>> list(data.target_names) + ['setosa', 'versicolor', 'virginica'] + """ + module_path = dirname(__file__) + data_file = csv.reader(open(join(module_path, 'data', 'iris.csv'))) + fdescr = open(join(module_path, 'descr', 'iris.rst')) + temp = next(data_file) + n_samples = int(temp[0]) + n_features = int(temp[1]) + target_names = np.array(temp[2:]) + data = np.empty((n_samples, n_features)) + target = np.empty((n_samples,), dtype=np.int) + + for i, ir in enumerate(data_file): + data[i] = np.asarray(ir[:-1], dtype=np.float) + target[i] = np.asarray(ir[-1], dtype=np.int) + + return Bunch(data=data, target=target, + target_names=target_names, + DESCR=fdescr.read(), + feature_names=['sepal length (cm)', 'sepal width (cm)', + 'petal length (cm)', 'petal width (cm)']) + + +def load_digits(n_class=10): + """Load and return the digits dataset (classification). + + Each datapoint is a 8x8 image of a digit. + + ================= ============== + Classes 10 + Samples per class ~180 + Samples total 1797 + Dimensionality 64 + Features integers 0-16 + ================= ============== + + + Parameters + ---------- + n_class : integer, between 0 and 10, optional (default=10) + The number of classes to return. + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: + 'data', the data to learn, 'images', the images corresponding + to each sample, 'target', the classification labels for each + sample, 'target_names', the meaning of the labels, and 'DESCR', + the full description of the dataset. + + Examples + -------- + To load the data and visualize the images:: + + >>> from sklearn.datasets import load_digits + >>> digits = load_digits() + >>> print(digits.data.shape) + (1797, 64) + >>> import pylab as pl #doctest: +SKIP + >>> pl.gray() #doctest: +SKIP + >>> pl.matshow(digits.images[0]) #doctest: +SKIP + >>> pl.show() #doctest: +SKIP + """ + module_path = dirname(__file__) + data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'), + delimiter=',') + descr = open(join(module_path, 'descr', 'digits.rst')).read() + target = data[:, -1] + flat_data = data[:, :-1] + images = flat_data.view() + images.shape = (-1, 8, 8) + + if n_class < 10: + idx = target < n_class + flat_data, target = flat_data[idx], target[idx] + images = images[idx] + + return Bunch(data=flat_data, + target=target.astype(np.int), + target_names=np.arange(10), + images=images, + DESCR=descr) + + +def load_diabetes(): + """Load and return the diabetes dataset (regression). + + ============== ================== + Samples total 442 + Dimensionality 10 + Features real, -.2 < x < .2 + Targets integer 25 - 346 + ============== ================== + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: + 'data', the data to learn and 'target', the regression target for each + sample. + """ + base_dir = join(dirname(__file__), 'data') + data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz')) + target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz')) + return Bunch(data=data, target=target) + + +def load_linnerud(): + """Load and return the linnerud dataset (multivariate regression). + + Samples total: 20 + Dimensionality: 3 for both data and targets + Features: integer + Targets: integer + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: 'data' and + 'targets', the two multivariate datasets, with 'data' corresponding to + the exercise and 'targets' corresponding to the physiological + measurements, as well as 'feature_names' and 'target_names'. + """ + base_dir = join(dirname(__file__), 'data/') + # Read data + data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1) + data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv', + skiprows=1) + # Read header + with open(base_dir + 'linnerud_exercise.csv') as f: + header_exercise = f.readline().split() + with open(base_dir + 'linnerud_physiological.csv') as f: + header_physiological = f.readline().split() + with open(dirname(__file__) + '/descr/linnerud.rst') as f: + descr = f.read() + + return Bunch(data=data_exercise, feature_names=header_exercise, + target=data_physiological, + target_names=header_physiological, + DESCR=descr) + + +def load_boston(): + """Load and return the boston house-prices dataset (regression). + + ============== ============== + Samples total 506 + Dimensionality 13 + Features real, positive + Targets real 5. - 50. + ============== ============== + + Returns + ------- + data : Bunch + Dictionary-like object, the interesting attributes are: + 'data', the data to learn, 'target', the regression targets, + 'target_names', the meaning of the labels, and 'DESCR', the + full description of the dataset. + + Examples + -------- + >>> from sklearn.datasets import load_boston + >>> boston = load_boston() + >>> print(boston.data.shape) + (506, 13) + """ + module_path = dirname(__file__) + data_file = csv.reader(open(join(module_path, 'data', + 'boston_house_prices.csv'))) + fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst')) + temp = next(data_file) + n_samples = int(temp[0]) + n_features = int(temp[1]) + data = np.empty((n_samples, n_features)) + target = np.empty((n_samples,)) + temp = next(data_file) # names of features + feature_names = np.array(temp) + + for i, d in enumerate(data_file): + data[i] = np.asarray(d[:-1], dtype=np.float) + target[i] = np.asarray(d[-1], dtype=np.float) + + return Bunch(data=data, + target=target, + feature_names=feature_names, + DESCR=fdescr.read()) + + +def load_sample_images(): + """Load sample images for image manipulation. + Loads both, ``china`` and ``flower``. + + Returns + ------- + data : Bunch + Dictionary-like object with the following attributes : + 'images', the two sample images, 'filenames', the file + names for the images, and 'DESCR' + the full description of the dataset. + + Examples + -------- + To load the data and visualize the images: + + >>> from sklearn.datasets import load_sample_images + >>> dataset = load_sample_images() #doctest: +SKIP + >>> len(dataset.images) #doctest: +SKIP + 2 + >>> first_img_data = dataset.images[0] #doctest: +SKIP + >>> first_img_data.shape #doctest: +SKIP + (427, 640, 3) + >>> first_img_data.dtype #doctest: +SKIP + dtype('uint8') + """ + # Try to import imread from scipy. We do this lazily here to prevent + # this module from depending on PIL. + try: + try: + from scipy.misc import imread + except ImportError: + from scipy.misc.pilutil import imread + except ImportError: + raise ImportError("The Python Imaging Library (PIL) " + "is required to load data from jpeg files") + module_path = join(dirname(__file__), "images") + with open(join(module_path, 'README.txt')) as f: + descr = f.read() + filenames = [join(module_path, filename) + for filename in os.listdir(module_path) + if filename.endswith(".jpg")] + # Load image data for each image in the source folder. + images = [imread(filename) for filename in filenames] + + return Bunch(images=images, + filenames=filenames, + DESCR=descr) + + +def load_sample_image(image_name): + """Load the numpy array of a single sample image + + Parameters + ----------- + image_name: {`china.jpg`, `flower.jpg`} + The name of the sample image loaded + + Returns + ------- + img: 3D array + The image as a numpy array: height x width x color + + Examples + --------- + + >>> from sklearn.datasets import load_sample_image + >>> china = load_sample_image('china.jpg') # doctest: +SKIP + >>> china.dtype # doctest: +SKIP + dtype('uint8') + >>> china.shape # doctest: +SKIP + (427, 640, 3) + >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP + >>> flower.dtype # doctest: +SKIP + dtype('uint8') + >>> flower.shape # doctest: +SKIP + (427, 640, 3) + """ + images = load_sample_images() + index = None + for i, filename in enumerate(images.filenames): + if filename.endswith(image_name): + index = i + break + if index is None: + raise AttributeError("Cannot find sample image: %s" % image_name) + return images.images[index] + +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import string +import sys + +HEADER = """\ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file automatically generated by testing/generate_gmock_mutant.py. +// DO NOT EDIT. + +#ifndef TESTING_GMOCK_MUTANT_H_ +#define TESTING_GMOCK_MUTANT_H_ + +// The intention of this file is to make possible using GMock actions in +// all of its syntactic beauty. Classes and helper functions can be used as +// more generic variants of Task and Callback classes (see base/task.h) +// Mutant supports both pre-bound arguments (like Task) and call-time +// arguments (like Callback) - hence the name. :-) +// +// DispatchToMethod/Function supports two sets of arguments: pre-bound (P) and +// call-time (C). The arguments as well as the return type are templatized. +// DispatchToMethod/Function will also try to call the selected method or +// function even if provided pre-bound arguments does not match exactly with +// the function signature hence the X1, X2 ... XN parameters in CreateFunctor. +// DispatchToMethod will try to invoke method that may not belong to the +// object's class itself but to the object's class base class. +// +// Additionally you can bind the object at calltime by binding a pointer to +// pointer to the object at creation time - before including this file you +// have to #define GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING. +// +// TODO(stoyan): It's yet not clear to me should we use T& and T&* instead +// of T* and T** when we invoke CreateFunctor to match the EXPECT_CALL style. +// +// +// Sample usage with gMock: +// +// struct Mock : public ObjectDelegate { +// MOCK_METHOD2(string, OnRequest(int n, const string& request)); +// MOCK_METHOD1(void, OnQuit(int exit_code)); +// MOCK_METHOD2(void, LogMessage(int level, const string& message)); +// +// string HandleFlowers(const string& reply, int n, const string& request) { +// string result = SStringPrintf("In request of %d %s ", n, request); +// for (int i = 0; i < n; ++i) result.append(reply) +// return result; +// } +// +// void DoLogMessage(int level, const string& message) { +// } +// +// void QuitMessageLoop(int seconds) { +// MessageLoop* loop = MessageLoop::current(); +// loop->PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(), +// 1000 * seconds); +// } +// }; +// +// Mock mock; +// // Will invoke mock.HandleFlowers("orchids", n, request) +// // "orchids" is a pre-bound argument, and and are call-time +// // arguments - they are not known until the OnRequest mock is invoked. +// EXPECT_CALL(mock, OnRequest(Ge(5), StartsWith("flower")) +// .Times(1) +// .WillOnce(Invoke(CreateFunctor(&mock, &Mock::HandleFlowers, +// string("orchids")))); +// +// +// // No pre-bound arguments, two call-time arguments passed +// // directly to DoLogMessage +// EXPECT_CALL(mock, OnLogMessage(_, _)) +// .Times(AnyNumber()) +// .WillAlways(Invoke(CreateFunctor, &mock, &Mock::DoLogMessage)); +// +// +// // In this case we have a single pre-bound argument - 3. We ignore +// // all of the arguments of OnQuit. +// EXCEPT_CALL(mock, OnQuit(_)) +// .Times(1) +// .WillOnce(InvokeWithoutArgs(CreateFunctor( +// &mock, &Mock::QuitMessageLoop, 3))); +// +// MessageLoop loop; +// loop.Run(); +// +// +// // Here is another example of how we can set an action that invokes +// // method of an object that is not yet created. +// struct Mock : public ObjectDelegate { +// MOCK_METHOD1(void, DemiurgeCreated(Demiurge*)); +// MOCK_METHOD2(void, OnRequest(int count, const string&)); +// +// void StoreDemiurge(Demiurge* w) { +// demiurge_ = w; +// } +// +// Demiurge* demiurge; +// } +// +// EXPECT_CALL(mock, DemiurgeCreated(_)).Times(1) +// .WillOnce(Invoke(CreateFunctor(&mock, &Mock::StoreDemiurge))); +// +// EXPECT_CALL(mock, OnRequest(_, StrEq("Moby Dick"))) +// .Times(AnyNumber()) +// .WillAlways(WithArgs<0>(Invoke( +// CreateFunctor(&mock->demiurge_, &Demiurge::DecreaseMonsters)))); +// + +#include "base/memory/linked_ptr.h" +#include "base/tuple.h" // for Tuple + +namespace testing {""" + +MUTANT = """\ + +// Interface that is exposed to the consumer, that does the actual calling +// of the method. +template +class MutantRunner { + public: + virtual R RunWithParams(const Params& params) = 0; + virtual ~MutantRunner() {} +}; + +// Mutant holds pre-bound arguments (like Task). Like Callback +// allows call-time arguments. You bind a pointer to the object +// at creation time. +template +class Mutant : public MutantRunner { + public: + Mutant(T* obj, Method method, const PreBound& pb) + : obj_(obj), method_(method), pb_(pb) { + } + + // MutantRunner implementation + virtual R RunWithParams(const Params& params) { + return DispatchToMethod(this->obj_, this->method_, pb_, params); + } + + T* obj_; + Method method_; + PreBound pb_; +}; + +template +class MutantFunction : public MutantRunner { + public: + MutantFunction(Function function, const PreBound& pb) + : function_(function), pb_(pb) { + } + + // MutantRunner implementation + virtual R RunWithParams(const Params& params) { + return DispatchToFunction(function_, pb_, params); + } + + Function function_; + PreBound pb_; +}; + +#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING +// MutantLateBind is like Mutant, but you bind a pointer to a pointer +// to the object. This way you can create actions for an object +// that is not yet created (has only storage for a pointer to it). +template +class MutantLateObjectBind : public MutantRunner { + public: + MutantLateObjectBind(T** obj, Method method, const PreBound& pb) + : obj_(obj), method_(method), pb_(pb) { + } + + // MutantRunner implementation. + virtual R RunWithParams(const Params& params) { + EXPECT_THAT(*this->obj_, testing::NotNull()); + if (NULL == *this->obj_) + return R(); + return DispatchToMethod( *this->obj_, this->method_, pb_, params); + } + + T** obj_; + Method method_; + PreBound pb_; +}; +#endif + +// Simple MutantRunner<> wrapper acting as a functor. +// Redirects operator() to MutantRunner::Run() +template +struct MutantFunctor { + explicit MutantFunctor(MutantRunner* cb) : impl_(cb) { + } + + ~MutantFunctor() { + } + + inline R operator()() { + return impl_->RunWithParams(Tuple0()); + } + + template + inline R operator()(const Arg1& a) { + return impl_->RunWithParams(Params(a)); + } + + template + inline R operator()(const Arg1& a, const Arg2& b) { + return impl_->RunWithParams(Params(a, b)); + } + + template + inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c) { + return impl_->RunWithParams(Params(a, b, c)); + } + + template + inline R operator()(const Arg1& a, const Arg2& b, const Arg3& c, + const Arg4& d) { + return impl_->RunWithParams(Params(a, b, c, d)); + } + + private: + // We need copy constructor since MutantFunctor is copied few times + // inside GMock machinery, hence no DISALLOW_EVIL_CONTRUCTORS + MutantFunctor(); + linked_ptr > impl_; +}; +""" + +FOOTER = """\ +} // namespace testing + +#endif // TESTING_GMOCK_MUTANT_H_""" + +# Templates for DispatchToMethod/DispatchToFunction functions. +# template_params - typename P1, typename P2.. typename C1.. +# prebound - TupleN +# calltime - TupleN +# args - p.a, p.b.., c.a, c.b.. +DISPATCH_TO_METHOD_TEMPLATE = """\ +template +inline R DispatchToMethod(T* obj, Method method, + const %(prebound)s& p, + const %(calltime)s& c) { + return (obj->*method)(%(args)s); +} +""" + +DISPATCH_TO_FUNCTION_TEMPLATE = """\ +template +inline R DispatchToFunction(Function function, + const %(prebound)s& p, + const %(calltime)s& c) { + return (*function)(%(args)s); +} +""" + +# Templates for CreateFunctor functions. +# template_params - typename P1, typename P2.. typename C1.. typename X1.. +# prebound - TupleN +# calltime - TupleN +# params - X1,.. , A1, .. +# args - const P1& p1 .. +# call_args - p1, p2, p3.. +CREATE_METHOD_FUNCTOR_TEMPLATE = """\ +template +inline MutantFunctor +CreateFunctor(T* obj, R (U::*method)(%(params)s), %(args)s) { + MutantRunner* t = + new Mutant + (obj, method, MakeTuple(%(call_args)s)); + return MutantFunctor(t); +} +""" + +CREATE_FUNCTION_FUNCTOR_TEMPLATE = """\ +template +inline MutantFunctor +CreateFunctor(R (*function)(%(params)s), %(args)s) { + MutantRunner* t = + new MutantFunction + (function, MakeTuple(%(call_args)s)); + return MutantFunctor(t); +} +""" + +def SplitLine(line, width): + """Splits a single line at comma, at most |width| characters long.""" + if len(line) < width: + return (line, None) + n = 1 + line[:width].rfind(",") + if n == 0: # If comma cannot be found give up and return the entire line. + return (line, None) + # Assume there is a space after the comma + assert line[n] == " " + return (line[:n], line[n + 1:]) + + +def Wrap(s, width, subsequent_offset=4): + """Wraps a single line |s| at commas so every line is at most |width| + characters long. + """ + w = [] + spaces = " " * subsequent_offset + while s: + (f, s) = SplitLine(s, width) + w.append(f) + if s: + s = spaces + s + return "\n".join(w) + + +def Clean(s): + """Cleans artifacts from generated C++ code. + + Our simple string formatting/concatenation may introduce extra commas. + """ + s = s.replace("<>", "") + s = s.replace(", >", ">") + s = s.replace(", )", ")") + s = s.replace(">>", "> >") + return s + + +def ExpandPattern(pattern, it): + """Return list of expanded pattern strings. + + Each string is created by replacing all '%' in |pattern| with element of |it|. + """ + return [pattern.replace("%", x) for x in it] + + +def Gen(pattern, n): + """Expands pattern replacing '%' with sequential integers. + + Expanded patterns will be joined with comma separator. + GenAlphs("X%", 3) will return "X1, X2, X3". + """ + it = string.hexdigits[1:n + 1] + return ", ".join(ExpandPattern(pattern, it)) + + +def GenAlpha(pattern, n): + """Expands pattern replacing '%' with sequential small ASCII letters. + + Expanded patterns will be joined with comma separator. + GenAlphs("X%", 3) will return "Xa, Xb, Xc". + """ + it = string.ascii_lowercase[0:n] + return ", ".join(ExpandPattern(pattern, it)) + + +def Merge(a): + return ", ".join(filter(len, a)) + + +def GenTuple(pattern, n): + return Clean("Tuple%d<%s>" % (n, Gen(pattern, n))) + + +def FixCode(s): + lines = Clean(s).splitlines() + # Wrap sometimes very long 1st and 3rd line at 80th column. + lines[0] = Wrap(lines[0], 80, 10) + lines[2] = Wrap(lines[2], 80, 4) + return "\n".join(lines) + + +def GenerateDispatch(prebound, calltime): + print "\n// %d - %d" % (prebound, calltime) + args = { + "template_params": Merge([Gen("typename P%", prebound), + Gen("typename C%", calltime)]), + "prebound": GenTuple("P%", prebound), + "calltime": GenTuple("C%", calltime), + "args": Merge([GenAlpha("p.%", prebound), GenAlpha("c.%", calltime)]), + } + + print FixCode(DISPATCH_TO_METHOD_TEMPLATE % args) + print FixCode(DISPATCH_TO_FUNCTION_TEMPLATE % args) + + +def GenerateCreateFunctor(prebound, calltime): + print "// %d - %d" % (prebound, calltime) + args = { + "calltime": GenTuple("A%", calltime), + "prebound": GenTuple("P%", prebound), + "params": Merge([Gen("X%", prebound), Gen("A%", calltime)]), + "args": Gen("const P%& p%", prebound), + "call_args": Gen("p%", prebound), + "template_params": Merge([Gen("typename P%", prebound), + Gen("typename A%", calltime), + Gen("typename X%", prebound)]) + } + + mutant = FixCode(CREATE_METHOD_FUNCTOR_TEMPLATE % args) + print mutant + + # Slightly different version for free function call. + print "\n", FixCode(CREATE_FUNCTION_FUNCTOR_TEMPLATE % args) + + # Functor with pointer to a pointer of the object. + print "\n#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" + mutant2 = mutant.replace("CreateFunctor(T* obj,", "CreateFunctor(T** obj,") + mutant2 = mutant2.replace("new Mutant", "new MutantLateObjectBind") + mutant2 = mutant2.replace(" " * 17 + "Tuple", " " * 31 + "Tuple") + print mutant2 + print "#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING\n" + + # OS_WIN specific. Same functors but with stdcall calling conventions. + # These are not for WIN64 (x86_64) because there is only one calling + # convention in WIN64. + # Functor for method with __stdcall calling conventions. + print "#if defined (OS_WIN) && !defined (ARCH_CPU_X86_64)" + stdcall_method = CREATE_METHOD_FUNCTOR_TEMPLATE + stdcall_method = stdcall_method.replace("U::", "__stdcall U::") + stdcall_method = FixCode(stdcall_method % args) + print stdcall_method + # Functor for free function with __stdcall calling conventions. + stdcall_function = CREATE_FUNCTION_FUNCTOR_TEMPLATE + stdcall_function = stdcall_function.replace("R (*", "R (__stdcall *") + print "\n", FixCode(stdcall_function % args) + + print "#ifdef GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" + stdcall2 = stdcall_method + stdcall2 = stdcall2.replace("CreateFunctor(T* obj,", "CreateFunctor(T** obj,") + stdcall2 = stdcall2.replace("new Mutant", "new MutantLateObjectBind") + stdcall2 = stdcall2.replace(" " * 17 + "Tuple", " " * 31 + "Tuple") + print stdcall2 + print "#endif // GMOCK_MUTANT_INCLUDE_LATE_OBJECT_BINDING" + print "#endif // defined (OS_WIN) && !defined (ARCH_CPU_X86_64)\n" + + +def main(): + print HEADER + for prebound in xrange(0, 6 + 1): + for args in xrange(0, 6 + 1): + GenerateDispatch(prebound, args) + print MUTANT + for prebound in xrange(0, 6 + 1): + for args in xrange(0, 6 + 1): + GenerateCreateFunctor(prebound, args) + print FOOTER + return 0 + + +if __name__ == "__main__": + sys.exit(main()) + +# -*- coding: utf-8 -*- + +# Form implementation generated from reading ui file 'load_data_ds.ui' +# +# Created: Fri Jun 12 17:18:39 2015 +# by: PyQt4 UI code generator 4.11.3 +# +# WARNING! All changes made in this file will be lost! + +from PyQt4 import QtCore, QtGui + +try: + _fromUtf8 = QtCore.QString.fromUtf8 +except AttributeError: + def _fromUtf8(s): + return s + +try: + _encoding = QtGui.QApplication.UnicodeUTF8 + def _translate(context, text, disambig): + return QtGui.QApplication.translate(context, text, disambig, _encoding) +except AttributeError: + def _translate(context, text, disambig): + return QtGui.QApplication.translate(context, text, disambig) + +class Ui_LoadDataDs(object): + def setupUi(self, LoadDataDs): + LoadDataDs.setObjectName(_fromUtf8("LoadDataDs")) + LoadDataDs.resize(1076, 520) + self.horizontalLayout = QtGui.QHBoxLayout(LoadDataDs) + self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) + self.verticalLayout = QtGui.QVBoxLayout() + self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) + self.SubAName = QtGui.QLabel(LoadDataDs) + self.SubAName.setObjectName(_fromUtf8("SubAName")) + self.verticalLayout.addWidget(self.SubAName) + self.SubAText = QtGui.QPlainTextEdit(LoadDataDs) + self.SubAText.setObjectName(_fromUtf8("SubAText")) + self.verticalLayout.addWidget(self.SubAText) + self.horizontalLayout.addLayout(self.verticalLayout) + self.verticalLayout_2 = QtGui.QVBoxLayout() + self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) + self.SubBName = QtGui.QLabel(LoadDataDs) + self.SubBName.setObjectName(_fromUtf8("SubBName")) + self.verticalLayout_2.addWidget(self.SubBName) + self.verticalWidget_2 = QtGui.QWidget(LoadDataDs) + self.verticalWidget_2.setObjectName(_fromUtf8("verticalWidget_2")) + self.verticalLayout_6 = QtGui.QVBoxLayout(self.verticalWidget_2) + self.verticalLayout_6.setMargin(0) + self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6")) + self.SubBText = QtGui.QDoubleSpinBox(self.verticalWidget_2) + self.SubBText.setDecimals(5) + self.SubBText.setMaximum(100000.0) + self.SubBText.setProperty("value", 1.0) + self.SubBText.setObjectName(_fromUtf8("SubBText")) + self.verticalLayout_6.addWidget(self.SubBText) + spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) + self.verticalLayout_6.addItem(spacerItem) + self.verticalLayout_2.addWidget(self.verticalWidget_2) + self.line_2 = QtGui.QFrame(LoadDataDs) + self.line_2.setFrameShape(QtGui.QFrame.HLine) + self.line_2.setFrameShadow(QtGui.QFrame.Sunken) + self.line_2.setObjectName(_fromUtf8("line_2")) + self.verticalLayout_2.addWidget(self.line_2) + self.verticalWidget = QtGui.QWidget(LoadDataDs) + self.verticalWidget.setObjectName(_fromUtf8("verticalWidget")) + self.verticalLayout_5 = QtGui.QVBoxLayout(self.verticalWidget) + self.verticalLayout_5.setMargin(0) + self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5")) + self.label = QtGui.QLabel(self.verticalWidget) + self.label.setObjectName(_fromUtf8("label")) + self.verticalLayout_5.addWidget(self.label) + self.InitConc = QtGui.QDoubleSpinBox(self.verticalWidget) + self.InitConc.setDecimals(5) + self.InitConc.setMaximum(999999.0) + self.InitConc.setProperty("value", 1.0) + self.InitConc.setObjectName(_fromUtf8("InitConc")) + self.verticalLayout_5.addWidget(self.InitConc) + self.label_2 = QtGui.QLabel(self.verticalWidget) + self.label_2.setObjectName(_fromUtf8("label_2")) + self.verticalLayout_5.addWidget(self.label_2) + self.TimeStep = QtGui.QDoubleSpinBox(self.verticalWidget) + self.TimeStep.setDecimals(5) + self.TimeStep.setMaximum(999999.0) + self.TimeStep.setProperty("value", 1.0) + self.TimeStep.setObjectName(_fromUtf8("TimeStep")) + self.verticalLayout_5.addWidget(self.TimeStep) + spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) + self.verticalLayout_5.addItem(spacerItem1) + self.verticalLayout_2.addWidget(self.verticalWidget) + self.horizontalLayout.addLayout(self.verticalLayout_2) + self.verticalLayout_3 = QtGui.QVBoxLayout() + self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3")) + self.label_3 = QtGui.QLabel(LoadDataDs) + self.label_3.setObjectName(_fromUtf8("label_3")) + self.verticalLayout_3.addWidget(self.label_3) + self.RateText = QtGui.QPlainTextEdit(LoadDataDs) + self.RateText.setObjectName(_fromUtf8("RateText")) + self.verticalLayout_3.addWidget(self.RateText) + self.horizontalLayout.addLayout(self.verticalLayout_3) + self.verticalLayout_4 = QtGui.QVBoxLayout() + self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4")) + self.AddRep = QtGui.QPushButton(LoadDataDs) + self.AddRep.setObjectName(_fromUtf8("AddRep")) + self.verticalLayout_4.addWidget(self.AddRep) + spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) + self.verticalLayout_4.addItem(spacerItem2) + self.SwitchSet = QtGui.QPushButton(LoadDataDs) + self.SwitchSet.setObjectName(_fromUtf8("SwitchSet")) + self.verticalLayout_4.addWidget(self.SwitchSet) + self.SwitchRoles = QtGui.QPushButton(LoadDataDs) + self.SwitchRoles.setObjectName(_fromUtf8("SwitchRoles")) + self.verticalLayout_4.addWidget(self.SwitchRoles) + self.line = QtGui.QFrame(LoadDataDs) + self.line.setFrameShape(QtGui.QFrame.HLine) + self.line.setFrameShadow(QtGui.QFrame.Sunken) + self.line.setObjectName(_fromUtf8("line")) + self.verticalLayout_4.addWidget(self.line) + self.formLayout = QtGui.QFormLayout() + self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow) + self.formLayout.setObjectName(_fromUtf8("formLayout")) + self.label_7 = QtGui.QLabel(LoadDataDs) + self.label_7.setObjectName(_fromUtf8("label_7")) + self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_7) + self.SetNu = QtGui.QLabel(LoadDataDs) + self.SetNu.setObjectName(_fromUtf8("SetNu")) + self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.SetNu) + self.label_5 = QtGui.QLabel(LoadDataDs) + self.label_5.setObjectName(_fromUtf8("label_5")) + self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_5) + self.RepNu = QtGui.QLabel(LoadDataDs) + self.RepNu.setObjectName(_fromUtf8("RepNu")) + self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.RepNu) + self.verticalLayout_4.addLayout(self.formLayout) + spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Ignored) + self.verticalLayout_4.addItem(spacerItem3) + self.OKButton = QtGui.QPushButton(LoadDataDs) + self.OKButton.setObjectName(_fromUtf8("OKButton")) + self.verticalLayout_4.addWidget(self.OKButton) + self.CancelButton = QtGui.QPushButton(LoadDataDs) + self.CancelButton.setObjectName(_fromUtf8("CancelButton")) + self.verticalLayout_4.addWidget(self.CancelButton) + self.horizontalLayout.addLayout(self.verticalLayout_4) + + self.retranslateUi(LoadDataDs) + QtCore.QMetaObject.connectSlotsByName(LoadDataDs) + + def retranslateUi(self, LoadDataDs): + LoadDataDs.setWindowTitle(_translate("LoadDataDs", "Dialog", None)) + self.SubAName.setText(_translate("LoadDataDs", "SubsA", None)) + self.SubBName.setText(_translate("LoadDataDs", "SubsB", None)) + self.label.setText(_translate("LoadDataDs", "Concentration", None)) + self.label_2.setText(_translate("LoadDataDs", "Injection spacing (time)", None)) + self.label_3.setText(_translate("LoadDataDs", "Reaction rate", None)) + self.AddRep.setText(_translate("LoadDataDs", "Append replicate to set", None)) + self.SwitchSet.setText(_translate("LoadDataDs", "Next set", None)) + self.SwitchRoles.setText(_translate("LoadDataDs", "Switch substrate roles", None)) + self.label_7.setText(_translate("LoadDataDs", "Adding to set:", None)) + self.SetNu.setText(_translate("LoadDataDs", "1", None)) + self.label_5.setText(_translate("LoadDataDs", "Replicates present:", None)) + self.RepNu.setText(_translate("LoadDataDs", "0", None)) + self.OKButton.setText(_translate("LoadDataDs", "Import", None)) + self.CancelButton.setText(_translate("LoadDataDs", "Cancel", None)) + + +from __future__ import division, print_function, absolute_import + +import threading +import warnings +from . import _minpack + +import numpy as np +from numpy import (atleast_1d, dot, take, triu, shape, eye, + transpose, zeros, prod, greater, array, + all, where, isscalar, asarray, inf, abs, + finfo, inexact, issubdtype, dtype) +from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError +from scipy._lib._util import _asarray_validated, _lazywhere +from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning +from ._lsq import least_squares +from ._lsq.common import make_strictly_feasible +from ._lsq.least_squares import prepare_bounds + +error = _minpack.error + +__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] + + +def _check_func(checker, argname, thefunc, x0, args, numinputs, + output_shape=None): + res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + if (output_shape is not None) and (shape(res) != output_shape): + if (output_shape[0] != 1): + if len(output_shape) > 1: + if output_shape[1] == 1: + return shape(res) + msg = "%s: there is a mismatch between the input and output " \ + "shape of the '%s' argument" % (checker, argname) + func_name = getattr(thefunc, '__name__', None) + if func_name: + msg += " '%s'." % func_name + else: + msg += "." + msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res)) + raise TypeError(msg) + if issubdtype(res.dtype, inexact): + dt = res.dtype + else: + dt = dtype(float) + return shape(res), dt + + +def fsolve(func, x0, args=(), fprime=None, full_output=0, + col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, + epsfcn=None, factor=100, diag=None): + """ + Find the roots of a function. + + Return the roots of the (non-linear) equations defined by + ``func(x) = 0`` given a starting estimate. + + Parameters + ---------- + func : callable ``f(x, *args)`` + A function that takes at least one (possibly vector) argument, + and returns a value of the same length. + x0 : ndarray + The starting estimate for the roots of ``func(x) = 0``. + args : tuple, optional + Any extra arguments to `func`. + fprime : callable ``f(x, *args)``, optional + A function to compute the Jacobian of `func` with derivatives + across the rows. By default, the Jacobian will be estimated. + full_output : bool, optional + If True, return optional outputs. + col_deriv : bool, optional + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float, optional + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int, optional + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple, optional + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + epsfcn : float, optional + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `epsfcn` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the + variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for + an unsuccessful call). + infodict : dict + A dictionary of optional outputs with the keys: + + ``nfev`` + number of function calls + ``njev`` + number of Jacobian calls + ``fvec`` + function evaluated at the output + ``fjac`` + the orthogonal matrix, q, produced by the QR + factorization of the final approximate Jacobian + matrix, stored column wise + ``r`` + upper triangular matrix produced by QR factorization + of the same matrix + ``qtf`` + the vector ``(transpose(q) * fvec)`` + + ier : int + An integer flag. Set to 1 if a solution was found, otherwise refer + to `mesg` for more information. + mesg : str + If no solution is found, `mesg` details the cause of failure. + + See Also + -------- + root : Interface to root finding algorithms for multivariate + functions. See the ``method=='hybr'`` in particular. + + Notes + ----- + ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. + + """ + options = {'col_deriv': col_deriv, + 'xtol': xtol, + 'maxfev': maxfev, + 'band': band, + 'eps': epsfcn, + 'factor': factor, + 'diag': diag} + + res = _root_hybr(func, x0, args, jac=fprime, **options) + if full_output: + x = res['x'] + info = dict((k, res.get(k)) + for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res) + info['fvec'] = res['fun'] + return x, info, res['status'], res['message'] + else: + status = res['status'] + msg = res['message'] + if status == 0: + raise TypeError(msg) + elif status == 1: + pass + elif status in [2, 3, 4, 5]: + warnings.warn(msg, RuntimeWarning) + else: + raise TypeError(msg) + return res['x'] + + +def _root_hybr(func, x0, args=(), jac=None, + col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, + factor=100, diag=None, **unknown_options): + """ + Find the roots of a multivariate function using MINPACK's hybrd and + hybrj routines (modified Powell method). + + Options + ------- + col_deriv : bool + Specify whether the Jacobian function computes derivatives down + the columns (faster, because there is no transpose operation). + xtol : float + The calculation will terminate if the relative error between two + consecutive iterates is at most `xtol`. + maxfev : int + The maximum number of calls to the function. If zero, then + ``100*(N+1)`` is the maximum where N is the number of elements + in `x0`. + band : tuple + If set to a two-sequence containing the number of sub- and + super-diagonals within the band of the Jacobi matrix, the + Jacobi matrix is considered banded (only for ``fprime=None``). + eps : float + A suitable step length for the forward-difference + approximation of the Jacobian (for ``fprime=None``). If + `eps` is less than the machine precision, it is assumed + that the relative errors in the functions are of the order of + the machine precision. + factor : float + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in the interval + ``(0.1, 100)``. + diag : sequence + N positive entries that serve as a scale factors for the + variables. + + """ + _check_unknown_options(unknown_options) + epsfcn = eps + + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) + if epsfcn is None: + epsfcn = finfo(dtype).eps + Dfun = jac + if Dfun is None: + if band is None: + ml, mu = -10, -10 + else: + ml, mu = band[:2] + if maxfev == 0: + maxfev = 200 * (n + 1) + retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, + ml, mu, epsfcn, factor, diag) + else: + _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) + if (maxfev == 0): + maxfev = 100 * (n + 1) + retval = _minpack._hybrj(func, Dfun, x0, args, 1, + col_deriv, xtol, maxfev, factor, diag) + + x, status = retval[0], retval[-1] + + errors = {0: "Improper input parameters were entered.", + 1: "The solution converged.", + 2: "The number of calls to function has " + "reached maxfev = %d." % maxfev, + 3: "xtol=%f is too small, no further improvement " + "in the approximate\n solution " + "is possible." % xtol, + 4: "The iteration is not making good progress, as measured " + "by the \n improvement from the last five " + "Jacobian evaluations.", + 5: "The iteration is not making good progress, " + "as measured by the \n improvement from the last " + "ten iterations.", + 'unknown': "An error occurred."} + + info = retval[1] + info['fun'] = info.pop('fvec') + sol = OptimizeResult(x=x, success=(status == 1), status=status) + sol.update(info) + try: + sol['message'] = errors[status] + except KeyError: + sol['message'] = errors['unknown'] + + return sol + + +LEASTSQ_SUCCESS = [1, 2, 3, 4] +LEASTSQ_FAILURE = [5, 6, 7, 8] + + +def leastsq(func, x0, args=(), Dfun=None, full_output=0, + col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + """ + Minimize the sum of squares of a set of equations. + + :: + + x = arg min(sum(func(y)**2,axis=0)) + y + + Parameters + ---------- + func : callable + should take at least one (possibly length N vector) argument and + returns M floating point numbers. It must not return NaNs or + fitting might fail. + x0 : ndarray + The starting estimate for the minimization. + args : tuple, optional + Any extra arguments to func are placed in this tuple. + Dfun : callable, optional + A function or method to compute the Jacobian of func with derivatives + across the rows. If this is None, the Jacobian will be estimated. + full_output : bool, optional + non-zero to return all optional outputs. + col_deriv : bool, optional + non-zero to specify that the Jacobian function computes derivatives + down the columns (faster, because there is no transpose operation). + ftol : float, optional + Relative error desired in the sum of squares. + xtol : float, optional + Relative error desired in the approximate solution. + gtol : float, optional + Orthogonality desired between the function vector and the columns of + the Jacobian. + maxfev : int, optional + The maximum number of calls to the function. If `Dfun` is provided + then the default `maxfev` is 100*(N+1) where N is the number of elements + in x0, otherwise the default `maxfev` is 200*(N+1). + epsfcn : float, optional + A variable used in determining a suitable step length for the forward- + difference approximation of the Jacobian (for Dfun=None). + Normally the actual step length will be sqrt(epsfcn)*x + If epsfcn is less than the machine precision, it is assumed that the + relative errors are of the order of the machine precision. + factor : float, optional + A parameter determining the initial step bound + (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. + diag : sequence, optional + N positive entries that serve as a scale factors for the variables. + + Returns + ------- + x : ndarray + The solution (or the result of the last iteration for an unsuccessful + call). + cov_x : ndarray + The inverse of the Hessian. `fjac` and `ipvt` are used to construct an + estimate of the Hessian. A value of None indicates a singular matrix, + which means the curvature in parameters `x` is numerically flat. To + obtain the covariance matrix of the parameters `x`, `cov_x` must be + multiplied by the variance of the residuals -- see curve_fit. + infodict : dict + a dictionary of optional outputs with the keys: + + ``nfev`` + The number of function calls + ``fvec`` + The function evaluated at the output + ``fjac`` + A permutation of the R matrix of a QR + factorization of the final approximate + Jacobian matrix, stored column wise. + Together with ipvt, the covariance of the + estimate can be approximated. + ``ipvt`` + An integer array of length N which defines + a permutation matrix, p, such that + fjac*p = q*r, where r is upper triangular + with diagonal elements of nonincreasing + magnitude. Column j of p is column ipvt(j) + of the identity matrix. + ``qtf`` + The vector (transpose(q) * fvec). + + mesg : str + A string message giving information about the cause of failure. + ier : int + An integer flag. If it is equal to 1, 2, 3 or 4, the solution was + found. Otherwise, the solution was not found. In either case, the + optional output variable 'mesg' gives more information. + + See Also + -------- + least_squares : Newer interface to solve nonlinear least-squares problems + with bounds on the variables. See ``method=='lm'`` in particular. + + Notes + ----- + "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. + + cov_x is a Jacobian approximation to the Hessian of the least squares + objective function. + This approximation assumes that the objective function is based on the + difference between some observed target data (ydata) and a (non-linear) + function of the parameters `f(xdata, params)` :: + + func(params) = ydata - f(xdata, params) + + so that the objective function is :: + + min sum((ydata - f(xdata, params))**2, axis=0) + params + + The solution, `x`, is always a 1D array, regardless of the shape of `x0`, + or whether `x0` is a scalar. + """ + x0 = asarray(x0).flatten() + n = len(x0) + if not isinstance(args, tuple): + args = (args,) + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] + + if n > m: + raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) + + if epsfcn is None: + epsfcn = finfo(dtype).eps + + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100 * (n + 1) + retval = _minpack._lmder(func, Dfun, x0, args, full_output, + col_deriv, ftol, xtol, gtol, maxfev, + factor, diag) + + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + "in the sum of squares\n are at most %f" % ftol, None], + 2: ["The relative error between two consecutive " + "iterates is at most %f" % xtol, None], + 3: ["Both actual and predicted relative reductions in " + "the sum of squares\n are at most %f and the " + "relative error between two consecutive " + "iterates is at \n most %f" % (ftol, xtol), None], + 4: ["The cosine of the angle between func(x) and any " + "column of the\n Jacobian is at most %f in " + "absolute value" % gtol, None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: ["ftol=%f is too small, no further reduction " + "in the sum of squares\n is possible." % ftol, + ValueError], + 7: ["xtol=%f is too small, no further improvement in " + "the approximate\n solution is possible." % xtol, + ValueError], + 8: ["gtol=%f is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine " + "precision." % gtol, ValueError]} + + # The FORTRAN return value (possible return values are >= 0 and <= 8) + info = retval[-1] + + if full_output: + cov_x = None + if info in LEASTSQ_SUCCESS: + from numpy.dual import inv + perm = take(eye(n), retval[1]['ipvt'] - 1, 0) + r = triu(transpose(retval[1]['fjac'])[:n, :]) + R = dot(r, perm) + try: + cov_x = inv(dot(transpose(R), R)) + except (LinAlgError, ValueError): + pass + return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) + else: + if info in LEASTSQ_FAILURE: + warnings.warn(errors[info][0], RuntimeWarning) + elif info == 0: + raise errors[info][1](errors[info][0]) + return retval[0], info + + +def _wrap_func(func, xdata, ydata, transform): + if transform is None: + def func_wrapped(params): + return func(xdata, *params) - ydata + elif transform.ndim == 1: + def func_wrapped(params): + return transform * (func(xdata, *params) - ydata) + else: + # Chisq = (y - yd)^T C^{-1} (y-yd) + # transform = L such that C = L L^T + # C^{-1} = L^{-T} L^{-1} + # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) + # Define (y-yd)' = L^{-1} (y-yd) + # by solving + # L (y-yd)' = (y-yd) + # and minimize (y-yd)'^T (y-yd)' + def func_wrapped(params): + return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) + return func_wrapped + + +def _wrap_jac(jac, xdata, transform): + if transform is None: + def jac_wrapped(params): + return jac(xdata, *params) + elif transform.ndim == 1: + def jac_wrapped(params): + return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) + else: + def jac_wrapped(params): + return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True) + return jac_wrapped + + +def _initialize_feasible(lb, ub): + p0 = np.ones_like(lb) + lb_finite = np.isfinite(lb) + ub_finite = np.isfinite(ub) + + mask = lb_finite & ub_finite + p0[mask] = 0.5 * (lb[mask] + ub[mask]) + + mask = lb_finite & ~ub_finite + p0[mask] = lb[mask] + 1 + + mask = ~lb_finite & ub_finite + p0[mask] = ub[mask] - 1 + + return p0 + + +def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, + check_finite=True, bounds=(-np.inf, np.inf), method=None, + jac=None, **kwargs): + """ + Use non-linear least squares to fit a function, f, to data. + + Assumes ``ydata = f(xdata, *params) + eps`` + + Parameters + ---------- + f : callable + The model function, f(x, ...). It must take the independent + variable as the first argument and the parameters to fit as + separate remaining arguments. + xdata : array_like or object + The independent variable where the data is measured. + Should usually be an M-length sequence or an (k,M)-shaped array for + functions with k predictors, but can actually be any object. + ydata : array_like + The dependent data, a length M array - nominally ``f(xdata, ...)``. + p0 : array_like, optional + Initial guess for the parameters (length N). If None, then the + initial values will all be 1 (if the number of parameters for the + function can be determined using introspection, otherwise a + ValueError is raised). + sigma : None or M-length sequence or MxM array, optional + Determines the uncertainty in `ydata`. If we define residuals as + ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` + depends on its number of dimensions: + + - A 1-d `sigma` should contain values of standard deviations of + errors in `ydata`. In this case, the optimized function is + ``chisq = sum((r / sigma) ** 2)``. + + - A 2-d `sigma` should contain the covariance matrix of + errors in `ydata`. In this case, the optimized function is + ``chisq = r.T @ inv(sigma) @ r``. + + .. versionadded:: 0.19 + + None (default) is equivalent of 1-d `sigma` filled with ones. + absolute_sigma : bool, optional + If True, `sigma` is used in an absolute sense and the estimated parameter + covariance `pcov` reflects these absolute values. + + If False, only the relative magnitudes of the `sigma` values matter. + The returned parameter covariance matrix `pcov` is based on scaling + `sigma` by a constant factor. This constant is set by demanding that the + reduced `chisq` for the optimal parameters `popt` when using the + *scaled* `sigma` equals unity. In other words, `sigma` is scaled to + match the sample variance of the residuals after the fit. + Mathematically, + ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` + check_finite : bool, optional + If True, check that the input arrays do not contain nans of infs, + and raise a ValueError if they do. Setting this parameter to + False may silently produce nonsensical results if the input arrays + do contain nans. Default is True. + bounds : 2-tuple of array_like, optional + Lower and upper bounds on parameters. Defaults to no bounds. + Each element of the tuple must be either an array with the length equal + to the number of parameters, or a scalar (in which case the bound is + taken to be the same for all parameters.) Use ``np.inf`` with an + appropriate sign to disable bounds on all or some parameters. + + .. versionadded:: 0.17 + method : {'lm', 'trf', 'dogbox'}, optional + Method to use for optimization. See `least_squares` for more details. + Default is 'lm' for unconstrained problems and 'trf' if `bounds` are + provided. The method 'lm' won't work when the number of observations + is less than the number of variables, use 'trf' or 'dogbox' in this + case. + + .. versionadded:: 0.17 + jac : callable, string or None, optional + Function with signature ``jac(x, ...)`` which computes the Jacobian + matrix of the model function with respect to parameters as a dense + array_like structure. It will be scaled according to provided `sigma`. + If None (default), the Jacobian will be estimated numerically. + String keywords for 'trf' and 'dogbox' methods can be used to select + a finite difference scheme, see `least_squares`. + + .. versionadded:: 0.18 + kwargs + Keyword arguments passed to `leastsq` for ``method='lm'`` or + `least_squares` otherwise. + + Returns + ------- + popt : array + Optimal values for the parameters so that the sum of the squared + residuals of ``f(xdata, *popt) - ydata`` is minimized + pcov : 2d array + The estimated covariance of popt. The diagonals provide the variance + of the parameter estimate. To compute one standard deviation errors + on the parameters use ``perr = np.sqrt(np.diag(pcov))``. + + How the `sigma` parameter affects the estimated covariance + depends on `absolute_sigma` argument, as described above. + + If the Jacobian matrix at the solution doesn't have a full rank, then + 'lm' method returns a matrix filled with ``np.inf``, on the other hand + 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute + the covariance matrix. + + Raises + ------ + ValueError + if either `ydata` or `xdata` contain NaNs, or if incompatible options + are used. + + RuntimeError + if the least-squares minimization fails. + + OptimizeWarning + if covariance of the parameters can not be estimated. + + See Also + -------- + least_squares : Minimize the sum of squares of nonlinear functions. + scipy.stats.linregress : Calculate a linear least squares regression for + two sets of measurements. + + Notes + ----- + With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm + through `leastsq`. Note that this algorithm can only deal with + unconstrained problems. + + Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to + the docstring of `least_squares` for more information. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.optimize import curve_fit + + >>> def func(x, a, b, c): + ... return a * np.exp(-b * x) + c + + Define the data to be fit with some noise: + + >>> xdata = np.linspace(0, 4, 50) + >>> y = func(xdata, 2.5, 1.3, 0.5) + >>> np.random.seed(1729) + >>> y_noise = 0.2 * np.random.normal(size=xdata.size) + >>> ydata = y + y_noise + >>> plt.plot(xdata, ydata, 'b-', label='data') + + Fit for the parameters a, b, c of the function `func`: + + >>> popt, pcov = curve_fit(func, xdata, ydata) + >>> popt + array([ 2.55423706, 1.35190947, 0.47450618]) + >>> plt.plot(xdata, func(xdata, *popt), 'r-', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + Constrain the optimization to the region of ``0 <= a <= 3``, + ``0 <= b <= 1`` and ``0 <= c <= 0.5``: + + >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) + >>> popt + array([ 2.43708906, 1. , 0.35015434]) + >>> plt.plot(xdata, func(xdata, *popt), 'g--', + ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) + + >>> plt.xlabel('x') + >>> plt.ylabel('y') + >>> plt.legend() + >>> plt.show() + + """ + if p0 is None: + # determine number of parameters by inspecting the function + from scipy._lib._util import getargspec_no_self as _getargspec + args, varargs, varkw, defaults = _getargspec(f) + if len(args) < 2: + raise ValueError("Unable to determine number of fit parameters.") + n = len(args) - 1 + else: + p0 = np.atleast_1d(p0) + n = p0.size + + lb, ub = prepare_bounds(bounds, n) + if p0 is None: + p0 = _initialize_feasible(lb, ub) + + bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) + if method is None: + if bounded_problem: + method = 'trf' + else: + method = 'lm' + + if method == 'lm' and bounded_problem: + raise ValueError("Method 'lm' only works for unconstrained problems. " + "Use 'trf' or 'dogbox' instead.") + + # optimization may produce garbage for float32 inputs, cast them to float64 + + # NaNs can not be handled + if check_finite: + ydata = np.asarray_chkfinite(ydata, float) + else: + ydata = np.asarray(ydata, float) + + if isinstance(xdata, (list, tuple, np.ndarray)): + # `xdata` is passed straight to the user-defined `f`, so allow + # non-array_like `xdata`. + if check_finite: + xdata = np.asarray_chkfinite(xdata, float) + else: + xdata = np.asarray(xdata, float) + + if ydata.size == 0: + raise ValueError("`ydata` must not be empty!") + + # Determine type of sigma + if sigma is not None: + sigma = np.asarray(sigma) + + # if 1-d, sigma are errors, define transform = 1/sigma + if sigma.shape == (ydata.size, ): + transform = 1.0 / sigma + # if 2-d, sigma is the covariance matrix, + # define transform = L such that L L^T = C + elif sigma.shape == (ydata.size, ydata.size): + try: + # scipy.linalg.cholesky requires lower=True to return L L^T = A + transform = cholesky(sigma, lower=True) + except LinAlgError: + raise ValueError("`sigma` must be positive definite.") + else: + raise ValueError("`sigma` has incorrect shape.") + else: + transform = None + + func = _wrap_func(f, xdata, ydata, transform) + if callable(jac): + jac = _wrap_jac(jac, xdata, transform) + elif jac is None and method != 'lm': + jac = '2-point' + + if 'args' in kwargs: + # The specification for the model function `f` does not support + # additional arguments. Refer to the `curve_fit` docstring for + # acceptable call signatures of `f`. + raise ValueError("'args' is not a supported keyword argument.") + + if method == 'lm': + # Remove full_output from kwargs, otherwise we're passing it in twice. + return_full = kwargs.pop('full_output', False) + res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) + popt, pcov, infodict, errmsg, ier = res + ysize = len(infodict['fvec']) + cost = np.sum(infodict['fvec'] ** 2) + if ier not in [1, 2, 3, 4]: + raise RuntimeError("Optimal parameters not found: " + errmsg) + else: + # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. + if 'max_nfev' not in kwargs: + kwargs['max_nfev'] = kwargs.pop('maxfev', None) + + res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, + **kwargs) + + if not res.success: + raise RuntimeError("Optimal parameters not found: " + res.message) + + ysize = len(res.fun) + cost = 2 * res.cost # res.cost is half sum of squares! + popt = res.x + + # Do Moore-Penrose inverse discarding zero singular values. + _, s, VT = svd(res.jac, full_matrices=False) + threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] + s = s[s > threshold] + VT = VT[:s.size] + pcov = np.dot(VT.T / s**2, VT) + return_full = False + + warn_cov = False + if pcov is None: + # indeterminate covariance + pcov = zeros((len(popt), len(popt)), dtype=float) + pcov.fill(inf) + warn_cov = True + elif not absolute_sigma: + if ysize > p0.size: + s_sq = cost / (ysize - p0.size) + pcov = pcov * s_sq + else: + pcov.fill(inf) + warn_cov = True + + if warn_cov: + warnings.warn('Covariance of the parameters could not be estimated', + category=OptimizeWarning) + + if return_full: + return popt, pcov, infodict, errmsg, ier + else: + return popt, pcov + + +def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): + """Perform a simple check on the gradient for correctness. + + """ + + x = atleast_1d(x0) + n = len(x) + x = x.reshape((n,)) + fvec = atleast_1d(fcn(x, *args)) + m = len(fvec) + fvec = fvec.reshape((m,)) + ldfjac = m + fjac = atleast_1d(Dfcn(x, *args)) + fjac = fjac.reshape((m, n)) + if col_deriv == 0: + fjac = transpose(fjac) + + xp = zeros((n,), float) + err = zeros((m,), float) + fvecp = None + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) + + fvecp = atleast_1d(fcn(xp, *args)) + fvecp = fvecp.reshape((m,)) + _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) + + good = (prod(greater(err, 0.5), axis=0)) + + return (good, err) + + +def _del2(p0, p1, d): + return p0 - np.square(p1 - p0) / d + + +def _relerr(actual, desired): + return (actual - desired) / desired + + +def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): + p0 = x0 + for i in range(maxiter): + p1 = func(p0, *args) + if use_accel: + p2 = func(p1, *args) + d = p2 - 2.0 * p1 + p0 + p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) + else: + p = p1 + relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) + if np.all(np.abs(relerr) < xtol): + return p + p0 = p + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): + """ + Find a fixed point of the function. + + Given a function of one or more variables and a starting point, find a + fixed-point of the function: i.e. where ``func(x0) == x0``. + + Parameters + ---------- + func : function + Function to evaluate. + x0 : array_like + Fixed point of function. + args : tuple, optional + Extra arguments to `func`. + xtol : float, optional + Convergence tolerance, defaults to 1e-08. + maxiter : int, optional + Maximum number of iterations, defaults to 500. + method : {"del2", "iteration"}, optional + Method of finding the fixed-point, defaults to "del2" + which uses Steffensen's Method with Aitken's ``Del^2`` + convergence acceleration [1]_. The "iteration" method simply iterates + the function until convergence is detected, without attempting to + accelerate the convergence. + + References + ---------- + .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 + + Examples + -------- + >>> from scipy import optimize + >>> def func(x, c1, c2): + ... return np.sqrt(c1/(x+c2)) + >>> c1 = np.array([10,12.]) + >>> c2 = np.array([3, 5.]) + >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) + array([ 1.4920333 , 1.37228132]) + + """ + use_accel = {'del2': True, 'iteration': False}[method] + x0 = _asarray_validated(x0, as_inexact=True) + return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel) + +from django.template.defaultfilters import default +from django.test import SimpleTestCase +from django.utils.safestring import mark_safe + +from ..utils import setup + + +class DefaultTests(SimpleTestCase): + """ + Literal string arguments to the default filter are always treated as + safe strings, regardless of the auto-escaping state. + + Note: we have to use {"a": ""} here, otherwise the invalid template + variable string interferes with the test result. + """ + + @setup({'default01': '{{ a|default:"x<" }}'}) + def test_default01(self): + output = self.engine.render_to_string('default01', {"a": ""}) + self.assertEqual(output, "x<") + + @setup({'default02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) + def test_default02(self): + output = self.engine.render_to_string('default02', {"a": ""}) + self.assertEqual(output, "x<") + + @setup({'default03': '{{ a|default:"x<" }}'}) + def test_default03(self): + output = self.engine.render_to_string('default03', {"a": mark_safe("x>")}) + self.assertEqual(output, "x>") + + @setup({'default04': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) + def test_default04(self): + output = self.engine.render_to_string('default04', {"a": mark_safe("x>")}) + self.assertEqual(output, "x>") + + +class DefaultIfNoneTests(SimpleTestCase): + + @setup({'default_if_none01': '{{ a|default:"x<" }}'}) + def test_default_if_none01(self): + output = self.engine.render_to_string('default_if_none01', {"a": None}) + self.assertEqual(output, "x<") + + @setup({'default_if_none02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) + def test_default_if_none02(self): + output = self.engine.render_to_string('default_if_none02', {"a": None}) + self.assertEqual(output, "x<") + + +class FunctionTests(SimpleTestCase): + + def test_value(self): + self.assertEqual(default('val', 'default'), 'val') + + def test_none(self): + self.assertEqual(default(None, 'default'), 'default') + + def test_empty_string(self): + self.assertEqual(default('', 'default'), 'default') + +# -*- coding: utf-8 -*- + +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. + +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. + :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) Float describing the timeout of the request in seconds. + :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'http://httpbin.org/get') + + """ + + session = sessions.Session() + return session.request(method=method, url=url, **kwargs) + + +def get(url, **kwargs): + """Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + kwargs.setdefault('allow_redirects', True) + return request('get', url, **kwargs) + + +def options(url, **kwargs): + """Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + kwargs.setdefault('allow_redirects', True) + return request('options', url, **kwargs) + + +def head(url, **kwargs): + """Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + kwargs.setdefault('allow_redirects', False) + return request('head', url, **kwargs) + + +def post(url, data=None, **kwargs): + """Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + return request('post', url, data=data, **kwargs) + + +def put(url, data=None, **kwargs): + """Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + return request('put', url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + """Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + return request('patch', url, data=data, **kwargs) + + +def delete(url, **kwargs): + """Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + """ + + return request('delete', url, **kwargs) + +# -*- coding: utf-8 -*- +# @Author: chandan +# @Date: 2017-07-08 00:32:09 +# @Last Modified by: chandan +# @Last Modified time: 2017-07-08 11:13:46 + +from data_utils import read_file +from config import DATA_DIR, SCORE_COLUMNS +import os +from model import train_model, test_model + +import pandas as pd +import numpy as np +from sklearn.preprocessing import MinMaxScaler +from sklearn.model_selection import train_test_split + +import os.path as osp + +ACC_FILE = 'RAW_ACCELEROMETERS.txt' +GPS_FILE = 'RAW_GPS.txt' +VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt' +SCORE_FILE = 'SEMANTIC_ONLINE.txt' + + +def main(): + # read acc, gps, veh det for multiple drivers, scenes + X_dfs, Y_dfs = [], [] + driver_dir = 'D1' + + for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)): + drive_path = osp.join(DATA_DIR, driver_dir, drive_dir) + print drive_path + + acc = read_file(osp.join(drive_path, ACC_FILE)) + gps = read_file(osp.join(drive_path, GPS_FILE)) + veh = read_file(osp.join(drive_path, VEHDET_FILE)) + + score = read_file(osp.join(drive_path, SCORE_FILE)) + datasets = [acc, gps, veh, score] + n_rows = min(map(len, datasets)) + + # sample high frequency data to lowest frequency + for i in range(len(datasets)): + # drop time column + datasets[i].drop(0, 1, inplace=True) + + if len(datasets[i]) > n_rows: + step = len(datasets[i]) / n_rows + ndx = xrange(0, n_rows * step, step) + datasets[i] = datasets[i].ix[ndx] + datasets[i] = datasets[i].reset_index(drop=True) + + score_df = datasets[-1] + datasets = datasets[:-1] + Y_df = score.ix[:, SCORE_COLUMNS] + + # create dataset + X_df = pd.concat(datasets, axis=1, ignore_index=True) + X_df.fillna(0, inplace=True) + print "X:", X_df.shape + print "Y:", score_df.shape + + X_dfs.append(X_df) + Y_dfs.append(Y_df) + + # preprocess + X_df = pd.concat(X_dfs, ignore_index=True) + X = X_df.values.astype('float32') + Y = pd.concat(Y_dfs, ignore_index=True).values + + print "X shape:", X.shape + print "Y shape:", Y.shape + + scaler = MinMaxScaler(feature_range=(0, 1)) + X = scaler.fit_transform(X) + + X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2) + + # train + print "X Train shape:", X_tr.shape + print "Y Train shape:", Y_tr.shape + + print "X test shape:", X_ts.shape + print "Y test shape:", Y_ts.shape + + seq_len = 16 + + X_tr_seq = X_to_seq(X, seq_len, 1) + Y_tr = Y_tr[seq_len:] + + X_ts_seq = X_to_seq(X_ts, seq_len, 1) + Y_ts = Y_ts[seq_len:] + + #train_model(X_tr, Y_tr) + + loss = test_model(X_ts_seq, Y_ts) + print loss + +def X_to_seq(X, seq_len=16, stride=1): + X_seqs = [] + + for start_ndx in range(0, len(X) - seq_len, stride): + X_seqs.append(X[start_ndx : start_ndx + seq_len]) + + return np.array(X_seqs) + +if __name__ == '__main__': + main() +#!/usr/bin/env python3 + +################################################################################# +# # +# Copyright (c) 2016 Allen Majewski (altoidnerd) # +# Permission is hereby granted, free of charge, to any person obtaining a # +# copy of this software and associated documentation files (the "Software"), # +# to deal in the Software without restriction, including without limitation # +# the rights to use, copy, modify, merge, publish, distribute, sublicense, # +# and/or sell copies of the Software, and to permit persons to whom the # +# Software is furnished to do so, subject to the following conditions: # +# # +# The above copyright notice and this permission notice shall be included # +# in all copies or substantial portions of the Software. # +# # +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # +# THE SOFTWARE. # +# # +################################################################################# + +import numpy as np +import os +import sys +import numpy.linalg as la + + + +################### +# basic math tools# +################### + +def norm(arr): + sum = 0 + for i in arr: + sum += float(i)**2 + return sum**.5 + + +def angle3(p1,p2,p3): + """ + Returns the bond angle corresponding + to three atomic positions. + You need to pass it numpy arrays + which is natural if you already + transformed the coordinates with + the lattice vectors + ... returns in degrees + """ + v1=p2-p1 + v2=p3-p2 + dot = v1@v2 + costheta = dot/(norm(v1)*norm(v2)) + return np.arccos(costheta)*180/np.pi-180 + +def angle2(r1,r2): + """ + Returns the angle between + two vectors. Pass numpy + arrays. + ... returns in RADIANS + """ + dot = r1@r2 + costheta = dot/(norm(r1)*norm(r2)) + return np.arccos(costheta) + +def rotx(theta): + """ + Returns a rotations matrix + that rotates a vector by an + angle theta about the x-axis. + """ + cos = np.cos + sin = np.sin + rotmat = [] + r1 = [ 1 , 0 , 0 ] + r2 = [ 0 , cos(theta),-sin(theta)] + r3 = [ 0 , sin(theta), cos(theta)] + rows=[r1,r2,r3] + + for row in rows: + rotmat.append(np.array(row)) + + return rotmat + + +def roty(theta): + """ + Returns a rotations matrix + that rotates a vector by an + angle theta about the y-axis. + """ + cos = np.cos + sin = np.sin + rotmat = [] + r1 = [ cos(theta), 0 , sin(theta)] + r2 = [ 0 , 1 , 0 ] + r3 = [-sin(theta), 0 , cos(theta)] + rows=[r1,r2,r3] + + for row in rows: + rotmat.append(np.array(row)) + + return rotmat + +def rotz(theta): + """ + Returns a rotations matrix + that rotates a vector by an + angle theta about the z-axis. + """ + cos = np.cos + sin = np.sin + rotmat = [] + r1 = [ cos(theta),-sin(theta), 0 ] + r2 = [ sin(theta), cos(theta), 0 ] + r3 = [ 0 , 0 , 1 ] + rows=[r1,r2,r3] + for row in rows: + rotmat.append(np.array(row)) + + return rotmat + +# for testing +# unit vectors +xhat=np.array([1,0,0]) +yhat=np.array([0,1,0]) +zhat=np.array([0,0,1]) +# common angles +t0 =2*np.pi +t30 = np.pi/6 +t60 = np.pi/3 +t90 = np.pi/2 +t180= np.pi +t270=3*np.pi/2 +t360=t0 + +################### +# scf.in parsiing # +################### + +def get_pwi_latvecs(pwi_file=None): + """ + Opens a pw.x input file and returns a np.matrix + of the CELL_PARAMETERS card. Recall + = latvecs @ + """ + if pwi_file is None: + pwi_file = smart_picker('pwi', os.getcwd()) + pwi = open(pwi_file, 'r').readlines() + + cell_params_start = min( + [ pwi.index(line) for line in pwi + if 'CELL_PARAM' in line ] + ) + params = [] + c = cell_params_start + return np.array([ line.split() for line in pwi[c+1:c+4] ], float).T + + +def get_pwi_crystal_coords(pwi_file=None, names=False): + """ + Opens a pw.x input file + and returns a numpy array of coordinates. + WARNING: it is zero indexed unline in PWSCF + and the get_efg_tensors() function + """ + if pwi_file is None: + pwi_file = smart_picker('pwi', os.getcwd()) + pwi = open(pwi_file, 'r').readlines() + + nat = int(sanitize_ends("".join([line for line in pwi if 'nat=' in line]))) + positions_card_startline = min( + [ pwi.index(line) for line in pwi + if 'ATOMIC_POSITIONS' in line] + ) + p = positions_card_startline + if not names: + return np.array([[0,0,0]]+ [ line.split()[1:] for line in pwi[ p: p+nat+1 ]][1:], float) + return [ line.split() for line in pwi[ p: p+nat+1 ]] + + + +def get_pwi_atomic_species(pwi_file=None, coordinates=False, tostring=False): + if pwi_file is None: + pwi_file = smart_picker('pwi', os.getcwd()) + pwi = open(pwi_file,'r').readlines() + + nat = int(sanitize_ends("".join([line for line in pwi if 'nat=' in line]))) + positions_card_startline = min( + [ pwi.index(line) for line in pwi + if 'ATOMIC_POSITIONS' in line] + ) + p = positions_card_startline + if not coordinates: + return [ line.split()[0] for line in pwi[ p: p+nat+1 ]] + if not tostring: + return [ line.split() for line in pwi[ p: p+nat+1 ]] + return [ line for line in pwi[ p: p+nat+1 ]] + +def get_pwi_alat_coords(pwi_file=None, tostring=False): + """ + Retrurns the coordinates in alat units + """ + latvecs = get_pwi_latvecs(pwi_file) + if not tostring: + return np.array([ np.dot(latvecs,vec).tolist() for vec in get_pwi_crystal_coords() ]) + else: + return [ ' '.join( list( map( str, np.dot(latvecs, vec)))) for vec in get_pwi_crystal_coords() ] + + +def get_pwi_pseudos(pwi_file=None): + """ + Returns a list of the pseudopotentials + used in a pwscf input. + """ + if pwi_file is None: + pwi_file = smart_picker('pwi', os.getcwd()) + pwi = open(pwi_file, 'r').readlines() + +# pseudo-validation - not yet +# pseudos_wordlist=[ "UPF","psl","pz","vwm","pbe","blyp","pw91","tpss","coulomb","ae","mt","bhs","vbc","van""rrkj","rrkjus","kjpaw","bpaw"] + pwi = open(pwi_file, 'r').readlines() + atomic_species_startline = min( + [ pwi.index(line) for line in pwi + if 'SPECIES' in line ] + ) + a = atomic_species_startline + ntyp = int(sanitize_ends("".join([line for line in pwi if 'ntyp=' in line]))) + species_list = [] + n = a + 1 + while len(species_list) < ntyp: + if not(set(pwi[n]).issubset({'\n','','\t','\r','!','/'})): + species_list.append(pwi[n]) + n += 1 + else: + n += 1 + + if len(species_list) == ntyp: + return [ li.split()[2] for li in species_list ] + + + + + + +################### +# magres parsing # +################### + +def get_efg_tensors(magres_file=None): + """ + Arguement is a magres format efg outfile. + Returns a list of EFG matrices (numpy.ndarray), + 1-indexed as site number in pwscf + (the zeroth position is empty). + """ + if magres_file is None: + magres_file = [ fil for fil in os.listdir('.') if fil.endswith('magres') ][0] + print('magres_file not specified. Openeing: {}'.format(magres_file)) + magres = open(magres_file,'r').readlines() + return [ np.array([line.split()[3:6], line.split()[6:9], line.split()[9:12]], float) for line in magres if 'efg' in line ] + + +def get_raw_efg_eigvecs(magres_file=None): + return np.array( [[]] + [ eigvecs(thing) for thing in get_efg_tensors(magres_file)[1:] ] ) + +def get_raw_efg_eigvals(magres_file=None): + return np.array( [[]] + [ eigvals(thing) for thing in get_efg_tensors(magres_file)[1:] ] ) + +# +# We may like to brush the dust off our linear algebra instinct +# +# efgs = get_efg_tensors() +# eigenvals = get_raw_efg_eigvals() +# eigenvecs = get_raw_efg_eigvecs() +# then we have, where is the nuclesr site ndex: 0 <= i <= nat; k is x,y,z so 0 <= k <= 2 +# ( efgs[i] @ eigenvecs[i].T[k] ) / eigenvals[i][k] == eigenvecs[i].T[k] +# +# though it will not always evaluate to true due to some floating point errors. +# + +def get_eigenparis(magres_file=None): + """ + get_eigenparis()[i][j][k]: + i in { 1..nat }; j in {0,1}; k in {0,1,2} + i: {1..nat} -> atomic specie + j: {0,1} -> {eigenvalues, eigenvectos/axes} + k: {0,1,2} -> {x,y,z}/{xx,yy,zz} + """ + return np.array( [[]] + [ (eigvals(thing), eigvecs(thing)) for thing in get_efg_tensors(magres_file)[1:] ] ) + + +def eigenmachine(magres_file=None): + """ + eigen_machine()[i][k]: + i in {0, 1}-> {VALS, VECS} + k in {0, nat -1} -> specie + NOTE: NOT 1-INDEXED!!! ZERO INDEXED FUNCTION + """ + return la.eigh(get_efg_tensors(magres_file)[1:]) + + +def get_efgs_dict(magres_file=None, nat=24): + """ + get_efgs_dict('infile') + -> dict(k,v) where k is an int + atom index e.g. 1, 2, 3 + and v is a dict of + efg tensor parameters + specify option getlist=True + to return a list instead + """ + + efgs_dict = dict() + for i in range(1, nat+1): + efgs_dict[i] = dict() + + spec_data = [[]] + [ la.eigh(get_efg_tensors(magres_file)[k]) for k in range(1,nat+1) ] + + for k in range(1,nat+1): + tmpdict = dict() + data = spec_data[k] + + mygenvals = data[0] + lmygenvals = mygenvals.tolist() + sort_genvals = np.sort( np.abs( spec_data[k][0] )).tolist() + + + vzzpm = sort_genvals.pop() + vyypm = sort_genvals.pop() + vxxpm = sort_genvals.pop() + + # print('vzzpm, vyypm, vzzpm', vzzpm, vyypm, vzzpm) + + mygenvecs = data[1].T + lmygenvecs = mygenvecs.tolist() + + if vzzpm in data[0]: + VZZ = vzzpm + else: + VZZ = -vzzpm + if vyypm in data[0]: + VYY = vyypm + else: + VYY = -vyypm + if vxxpm in data[0]: + VXX = vxxpm + else: + VXX = -vxxpm + + efgs_dict[k]['Vzz'] = VZZ + efgs_dict[k]['Vyy'] = VYY + efgs_dict[k]['Vxx'] = VXX + + + efgs_dict[k]['z-axis'] = lmygenvecs[lmygenvals.index(VZZ)] + efgs_dict[k]['y-axis'] = lmygenvecs[lmygenvals.index(VYY)] + efgs_dict[k]['x-axis'] = lmygenvecs[lmygenvals.index(VXX)] + + + return efgs_dict + + + + +#################### +# efg.out parsing # +#################### + + + +def get_axes(infile, keyword=None): + """ +get_axes('efg.*.out') -> array-like +argument is an efg output file + +kw -> e.g. 'Cl 1' + +Returns an array containing the +primed principal axes components. +Override the default keyword +using the kw argument. + +get_axes(infile)[0] <- X' +get_axes(infile)[1] <- Y' +get_axes(infile)[2] <- Z' + """ + #Vxx, X =-1.6267, np.array([ -0.310418, -0.435918, 0.844758 ]) + #Vyy, Y =-1.9819, np.array([ 0.522549, 0.664099, 0.534711 ]) + #Vzz, Z = 3.6086, np.array([ -0.794093, 0.607411, 0.021640 ]) + #keyword = 'Cl 1' + + if keyword is None: + keywrod = 'Cl 1' + + f = open(infile,'r').readlines() + relevant = [ line.strip().replace(')','').replace('(','') for line in f if kw in line and 'axis' in line ] + axes_list = [ line.split()[5:] for line in relevant ] + axes_list = np.array([ list(map(float, axis)) for axis in axes_list ]) + # require the same signs as the refernece set of axes + if axes_list[0][0] > 0: + axes_list[0] = -1*axes_list[0] + if axes_list[1][0] < 0: + axes_list[1] = -1*axes_list[1] + if axes_list[2][0] > 0: + axes_list[2] = -1*axes_list[2] + return axes_list + +#this_X = get_axes(sys.argv[1])[0] +#this_Y = get_axes(sys.argv[1])[1] +#this_Z = get_axes(sys.argv[1])[2] + +#print(this_X,this_Y,this_Z) + +#get_axes(sys.argv[1]) + +def get_Vijs(infile): + f = open(infile,'r').readlines() + relevant = [ line.strip().replace(')','').replace('(','') for line in f if kw in line and 'axis' in line ] + axes_list = [ line.split()[5:] for line in relevant ] + axes_list = np.array([ list(map(float, axis)) for axis in axes_list ]) + +def get_angles(infile, tensor=None): + """ +get_angles('efg.*.out') -> array-like +argument is an efg output file + +Returns an array containing the +euler angles for the given +EFG principal axes relative +to the fixed axes (hard coded). + +get_angles(infile)[0] <- theta_X +get_angles(infile)[1] <- theta_Y + """ + if tensor is None: + Vxx, X =-1.6267, np.array([ -0.310418, -0.435918, 0.844758 ]) + Vyy, Y =-1.9819, np.array([ 0.522549, 0.664099, 0.534711 ]) + Vzz, Z = 3.6086, np.array([ -0.794093, 0.607411, 0.021640 ]) + + + this_X = get_axes(infile)[0] + this_Y = get_axes(infile)[1] + this_Z = get_axes(infile)[2] + theta_X = np.arcsin((this_Z@Y)/np.linalg.norm(Y)) + theta_Y = np.arcsin((this_Z@X)/(np.linalg.norm(X)*np.cos(theta_X))) + return np.array( [ theta_X, theta_Y ]) + + + +##################### +# pwscf.out parsing # +##################### + + +def get_pwo_forces(pwo_file=None): + if pwo_file is None: + pwo_file = [ fil for fil in os.listdir('.') if (fil.endswith('out') or fil.endswith('pwo')) and ('scf' in fil or 'relax' in fil or 'md' in fil ) ][0] + print('No input specified: opening {}'.format(pwo_file)) + + pwo = open(pwo_file,'r').readlines() + force_lines = [ line for line in pwo if 'force =' in line ] + numlines = len(force_lines) + nat = int(numlines/7) + return (force_lines[:nat]) + + + +#################### +# util/helpers # +#################### + +def smart_picker(find_type, path='.'): + if find_type == 'pwi': + choice = [ fil for fil in os.listdir('.') + if ( (fil.endswith('in') or fil.endswith('pwi')) + or 'inp' in fil) + and ('scf' in fil + or 'relax' in fil + or 'md' in fil) ][0] + if find_type == 'magres': + choice = [ fil for fil in os.listdir('.') + if fil.endswith('magres') ][0] + if find_type == 'pwo': + choice = [ fil for fil in os.listdir('.') + if (fil.endswith('out') or fil.endswith('pwo')) + and ('scf' in fil or 'relax' in fil or 'md' in fil ) ][0] + print("No input specified. Opening: {}".format(choice)) + return choice + +def sanitize_ends(s, targets=' \n\tabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"`}{[]\|/> np.array([ v1, v2, v3 ]) + + ... + + From PW_INPUT.html: + -12 Monoclinic P, unique axis b + celldm(2)=b/a + celldm(3)=c/a, + celldm(5)=cos(ac) + v1 = (a,0,0), v2 = (0,b,0), v3 = (c*cos(beta),0,c*sin(beta)) + where beta is the angle between axis a and c + + + """ + v1 = [a,0,0] + v2 = [0,b,0] + v3 = [c*np.cos(beta),0,c*np.sin(beta)] + + return np.array([v1,v2,v3]) + + + +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2017, F5 Networks Inc. +# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'certified'} + +DOCUMENTATION = r''' +--- +module: bigip_gtm_monitor_https +short_description: Manages F5 BIG-IP GTM https monitors +description: + - Manages F5 BIG-IP GTM https monitors. +version_added: 2.6 +options: + name: + description: + - Monitor name. + type: str + required: True + parent: + description: + - The parent template of this monitor template. Once this value has + been set, it cannot be changed. By default, this value is the C(tcp) + parent on the C(Common) partition. + type: str + default: /Common/https + send: + description: + - The send string for the monitor call. + - When creating a new monitor, if this parameter is not provided, the + default of C(GET /\r\n) will be used. + type: str + receive: + description: + - The receive string for the monitor call. + type: str + ip: + description: + - IP address part of the IP/port definition. If this parameter is not + provided when creating a new monitor, then the default value will be + '*'. + - If this value is an IP address, then a C(port) number must be specified. + type: str + port: + description: + - Port address part of the IP/port definition. If this parameter is not + provided when creating a new monitor, then the default value will be + '*'. Note that if specifying an IP address, a value between 1 and 65535 + must be specified. + type: str + interval: + description: + - The interval specifying how frequently the monitor instance of this + template will run. + - If this parameter is not provided when creating a new monitor, then + the default value will be 30. + - This value B(must) be less than the C(timeout) value. + type: int + timeout: + description: + - The number of seconds in which the node or service must respond to + the monitor request. If the target responds within the set time + period, it is considered up. If the target does not respond within + the set time period, it is considered down. You can change this + number to any number you want, however, it should be 3 times the + interval number of seconds plus 1 second. + - If this parameter is not provided when creating a new monitor, then the + default value will be 120. + type: int + partition: + description: + - Device partition to manage resources on. + type: str + default: Common + state: + description: + - When C(present), ensures that the monitor exists. + - When C(absent), ensures the monitor is removed. + type: str + choices: + - present + - absent + default: present + probe_timeout: + description: + - Specifies the number of seconds after which the system times out the probe request + to the system. + - When creating a new monitor, if this parameter is not provided, then the default + value will be C(5). + type: int + ignore_down_response: + description: + - Specifies that the monitor allows more than one probe attempt per interval. + - When C(yes), specifies that the monitor ignores down responses for the duration of + the monitor timeout. Once the monitor timeout is reached without the system receiving + an up response, the system marks the object down. + - When C(no), specifies that the monitor immediately marks an object down when it + receives a down response. + - When creating a new monitor, if this parameter is not provided, then the default + value will be C(no). + type: bool + transparent: + description: + - Specifies whether the monitor operates in transparent mode. + - A monitor in transparent mode directs traffic through the associated pool members + or nodes (usually a router or firewall) to the aliased destination (that is, it + probes the C(ip)-C(port) combination specified in the monitor). + - If the monitor cannot successfully reach the aliased destination, the pool member + or node through which the monitor traffic was sent is marked down. + - When creating a new monitor, if this parameter is not provided, then the default + value will be C(no). + type: bool + reverse: + description: + - Instructs the system to mark the target resource down when the test is successful. + This setting is useful, for example, if the content on your web site home page is + dynamic and changes frequently, you may want to set up a reverse ECV service check + that looks for the string Error. + - A match for this string means that the web server was down. + - To use this option, you must specify values for C(send) and C(receive). + type: bool + target_username: + description: + - Specifies the user name, if the monitored target requires authentication. + type: str + target_password: + description: + - Specifies the password, if the monitored target requires authentication. + type: str + update_password: + description: + - C(always) will update passwords if the C(target_password) is specified. + - C(on_create) will only set the password for newly created monitors. + type: str + choices: + - always + - on_create + default: always + cipher_list: + description: + - Specifies the list of ciphers for this monitor. + - The items in the cipher list are separated with the colon C(:) symbol. + - When creating a new monitor, if this parameter is not specified, the default + list is C(DEFAULT:+SHA:+3DES:+kEDH). + type: str + compatibility: + description: + - Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to B(all). + - When creating a new monitor, if this value is not specified, the default is + C(yes) + type: bool + client_cert: + description: + - Specifies a fully-qualified path for a client certificate that the monitor sends to + the target SSL server. + type: str + client_key: + description: + - Specifies a key for a client certificate that the monitor sends to the target SSL server. + type: str +extends_documentation_fragment: f5 +author: + - Tim Rupp (@caphrim007) + - Wojciech Wypior (@wojtek0806) +''' + +EXAMPLES = r''' +- name: Create a GTM HTTPS monitor + bigip_gtm_monitor_https: + name: my_monitor + ip: 1.1.1.1 + port: 80 + send: my send string + receive: my receive string + state: present + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Remove HTTPS Monitor + bigip_gtm_monitor_https: + name: my_monitor + state: absent + provider: + user: admin + password: secret + server: lb.mydomain.com + delegate_to: localhost + +- name: Add HTTPS monitor for all addresses, port 514 + bigip_gtm_monitor_https: + name: my_monitor + provider: + user: admin + password: secret + server: lb.mydomain.com + port: 514 + delegate_to: localhost +''' + +RETURN = r''' +parent: + description: New parent template of the monitor. + returned: changed + type: str + sample: https +ip: + description: The new IP of IP/port definition. + returned: changed + type: str + sample: 10.12.13.14 +port: + description: The new port the monitor checks the resource on. + returned: changed + type: str + sample: 8080 +interval: + description: The new interval in which to run the monitor check. + returned: changed + type: int + sample: 2 +timeout: + description: The new timeout in which the remote system must respond to the monitor. + returned: changed + type: int + sample: 10 +ignore_down_response: + description: Whether to ignore the down response or not. + returned: changed + type: bool + sample: True +send: + description: The new send string for this monitor. + returned: changed + type: str + sample: tcp string to send +receive: + description: The new receive string for this monitor. + returned: changed + type: str + sample: tcp string to receive +probe_timeout: + description: The new timeout in which the system will timeout the monitor probe. + returned: changed + type: int + sample: 10 +reverse: + description: The new value for whether the monitor operates in reverse mode. + returned: changed + type: bool + sample: False +transparent: + description: The new value for whether the monitor operates in transparent mode. + returned: changed + type: bool + sample: False +cipher_list: + description: The new value for the cipher list. + returned: changed + type: str + sample: +3DES:+kEDH +compatibility: + description: The new SSL compatibility setting. + returned: changed + type: bool + sample: True +client_cert: + description: The new client cert setting. + returned: changed + type: str + sample: /Common/default +client_key: + description: The new client key setting. + returned: changed + type: str + sample: /Common/default +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import env_fallback + +try: + from library.module_utils.network.f5.bigip import F5RestClient + from library.module_utils.network.f5.common import F5ModuleError + from library.module_utils.network.f5.common import AnsibleF5Parameters + from library.module_utils.network.f5.common import fq_name + from library.module_utils.network.f5.common import f5_argument_spec + from library.module_utils.network.f5.common import transform_name + from library.module_utils.network.f5.icontrol import module_provisioned + from library.module_utils.network.f5.ipaddress import is_valid_ip +except ImportError: + from ansible.module_utils.network.f5.bigip import F5RestClient + from ansible.module_utils.network.f5.common import F5ModuleError + from ansible.module_utils.network.f5.common import AnsibleF5Parameters + from ansible.module_utils.network.f5.common import fq_name + from ansible.module_utils.network.f5.common import f5_argument_spec + from ansible.module_utils.network.f5.common import transform_name + from ansible.module_utils.network.f5.icontrol import module_provisioned + from ansible.module_utils.network.f5.ipaddress import is_valid_ip + + +class Parameters(AnsibleF5Parameters): + api_map = { + 'defaultsFrom': 'parent', + 'ignoreDownResponse': 'ignore_down_response', + 'probeTimeout': 'probe_timeout', + 'recv': 'receive', + 'username': 'target_username', + 'password': 'target_password', + 'cipherlist': 'cipher_list', + 'cert': 'client_cert', + 'key': 'client_key', + } + + api_attributes = [ + 'defaultsFrom', + 'interval', + 'timeout', + 'destination', + 'transparent', + 'probeTimeout', + 'ignoreDownResponse', + 'reverse', + 'send', + 'recv', + 'username', + 'password', + 'cipherlist', + 'compatibility', + 'cert', + 'key', + ] + + returnables = [ + 'parent', + 'ip', + 'port', + 'interval', + 'timeout', + 'transparent', + 'probe_timeout', + 'ignore_down_response', + 'send', + 'receive', + 'reverse', + 'cipher_list', + 'compatibility', + 'client_cert', + 'client_key', + ] + + updatables = [ + 'destination', + 'interval', + 'timeout', + 'transparent', + 'probe_timeout', + 'ignore_down_response', + 'send', + 'receive', + 'reverse', + 'ip', + 'port', + 'target_username', + 'target_password', + 'cipher_list', + 'compatibility', + 'client_cert', + 'client_key', + ] + + +class ApiParameters(Parameters): + @property + def ip(self): + ip, port = self._values['destination'].split(':') + return ip + + @property + def port(self): + ip, port = self._values['destination'].split(':') + try: + return int(port) + except ValueError: + return port + + @property + def ignore_down_response(self): + if self._values['ignore_down_response'] is None: + return None + if self._values['ignore_down_response'] == 'disabled': + return False + return True + + @property + def transparent(self): + if self._values['transparent'] is None: + return None + if self._values['transparent'] == 'disabled': + return False + return True + + @property + def reverse(self): + if self._values['reverse'] is None: + return None + if self._values['reverse'] == 'disabled': + return False + return True + + @property + def compatibility(self): + if self._values['compatibility'] is None: + return None + if self._values['compatibility'] == 'disabled': + return False + return True + + +class ModuleParameters(Parameters): + @property + def interval(self): + if self._values['interval'] is None: + return None + if 1 > int(self._values['interval']) > 86400: + raise F5ModuleError( + "Interval value must be between 1 and 86400" + ) + return int(self._values['interval']) + + @property + def timeout(self): + if self._values['timeout'] is None: + return None + return int(self._values['timeout']) + + @property + def ip(self): + if self._values['ip'] is None: + return None + if self._values['ip'] in ['*', '0.0.0.0']: + return '*' + elif is_valid_ip(self._values['ip']): + return self._values['ip'] + else: + raise F5ModuleError( + "The provided 'ip' parameter is not an IP address." + ) + + @property + def parent(self): + if self._values['parent'] is None: + return None + result = fq_name(self.partition, self._values['parent']) + return result + + @property + def port(self): + if self._values['port'] is None: + return None + elif self._values['port'] == '*': + return '*' + return int(self._values['port']) + + @property + def destination(self): + if self.ip is None and self.port is None: + return None + destination = '{0}:{1}'.format(self.ip, self.port) + return destination + + @destination.setter + def destination(self, value): + ip, port = value.split(':') + self._values['ip'] = ip + self._values['port'] = port + + @property + def probe_timeout(self): + if self._values['probe_timeout'] is None: + return None + return int(self._values['probe_timeout']) + + @property + def type(self): + return 'https' + + @property + def client_cert(self): + if self._values['client_cert'] is None: + return None + if self._values['client_cert'] == '': + return '' + result = fq_name(self.partition, self._values['client_cert']) + if not result.endswith('.crt'): + result += '.crt' + return result + + @property + def client_key(self): + if self._values['client_key'] is None: + return None + if self._values['client_key'] == '': + return '' + result = fq_name(self.partition, self._values['client_key']) + if not result.endswith('.key'): + result += '.key' + return result + + +class Changes(Parameters): + def to_return(self): + result = {} + try: + for returnable in self.returnables: + result[returnable] = getattr(self, returnable) + result = self._filter_params(result) + except Exception: + pass + return result + + +class UsableChanges(Changes): + @property + def transparent(self): + if self._values['transparent'] is None: + return None + elif self._values['transparent'] is True: + return 'enabled' + return 'disabled' + + @property + def ignore_down_response(self): + if self._values['ignore_down_response'] is None: + return None + elif self._values['ignore_down_response'] is True: + return 'enabled' + return 'disabled' + + @property + def reverse(self): + if self._values['reverse'] is None: + return None + elif self._values['reverse'] is True: + return 'enabled' + return 'disabled' + + @property + def compatibility(self): + if self._values['compatibility'] is None: + return None + elif self._values['compatibility'] is True: + return 'enabled' + return 'disabled' + + +class ReportableChanges(Changes): + @property + def ip(self): + ip, port = self._values['destination'].split(':') + return ip + + @property + def port(self): + ip, port = self._values['destination'].split(':') + return int(port) + + @property + def transparent(self): + if self._values['transparent'] == 'enabled': + return True + return False + + @property + def ignore_down_response(self): + if self._values['ignore_down_response'] == 'enabled': + return True + return False + + @property + def reverse(self): + if self._values['reverse'] == 'enabled': + return True + return False + + @property + def compatibility(self): + if self._values['compatibility'] == 'enabled': + return True + return False + + +class Difference(object): + def __init__(self, want, have=None): + self.want = want + self.have = have + + def compare(self, param): + try: + result = getattr(self, param) + return result + except AttributeError: + return self.__default(param) + + def __default(self, param): + attr1 = getattr(self.want, param) + try: + attr2 = getattr(self.have, param) + if attr1 != attr2: + return attr1 + except AttributeError: + return attr1 + + @property + def parent(self): + if self.want.parent != self.have.parent: + raise F5ModuleError( + "The parent monitor cannot be changed" + ) + + @property + def destination(self): + if self.want.ip is None and self.want.port is None: + return None + if self.want.port is None: + self.want.update({'port': self.have.port}) + if self.want.ip is None: + self.want.update({'ip': self.have.ip}) + + if self.want.port in [None, '*'] and self.want.ip != '*': + raise F5ModuleError( + "Specifying an IP address requires that a port number be specified" + ) + + if self.want.destination != self.have.destination: + return self.want.destination + + @property + def interval(self): + if self.want.timeout is not None and self.want.interval is not None: + if self.want.interval >= self.want.timeout: + raise F5ModuleError( + "Parameter 'interval' must be less than 'timeout'." + ) + elif self.want.timeout is not None: + if self.have.interval >= self.want.timeout: + raise F5ModuleError( + "Parameter 'interval' must be less than 'timeout'." + ) + elif self.want.interval is not None: + if self.want.interval >= self.have.timeout: + raise F5ModuleError( + "Parameter 'interval' must be less than 'timeout'." + ) + if self.want.interval != self.have.interval: + return self.want.interval + + @property + def target_password(self): + if self.want.target_password != self.have.target_password: + if self.want.update_password == 'always': + result = self.want.target_password + return result + + @property + def client_cert(self): + if self.have.client_cert is None and self.want.client_cert == '': + return None + if self.have.client_cert != self.want.client_cert: + return self.want.client_cert + + @property + def client_key(self): + if self.have.client_key is None and self.want.client_key == '': + return None + if self.have.client_key != self.want.client_key: + return self.want.client_key + + +class ModuleManager(object): + def __init__(self, *args, **kwargs): + self.module = kwargs.get('module', None) + self.client = F5RestClient(**self.module.params) + self.want = ModuleParameters(params=self.module.params) + self.have = ApiParameters() + self.changes = UsableChanges() + + def _set_changed_options(self): + changed = {} + for key in Parameters.returnables: + if getattr(self.want, key) is not None: + changed[key] = getattr(self.want, key) + if changed: + self.changes = UsableChanges(params=changed) + + def _update_changed_options(self): + diff = Difference(self.want, self.have) + updatables = Parameters.updatables + changed = dict() + for k in updatables: + change = diff.compare(k) + if change is None: + continue + else: + if isinstance(change, dict): + changed.update(change) + else: + changed[k] = change + if changed: + self.changes = UsableChanges(params=changed) + return True + return False + + def should_update(self): + result = self._update_changed_options() + if result: + return True + return False + + def _announce_deprecations(self, result): + warnings = result.pop('__warnings', []) + for warning in warnings: + self.client.module.deprecate( + msg=warning['msg'], + version=warning['version'] + ) + + def _set_default_creation_values(self): + if self.want.timeout is None: + self.want.update({'timeout': 120}) + if self.want.interval is None: + self.want.update({'interval': 30}) + if self.want.probe_timeout is None: + self.want.update({'probe_timeout': 5}) + if self.want.ip is None: + self.want.update({'ip': '*'}) + if self.want.port is None: + self.want.update({'port': '*'}) + if self.want.ignore_down_response is None: + self.want.update({'ignore_down_response': False}) + if self.want.transparent is None: + self.want.update({'transparent': False}) + if self.want.send is None: + self.want.update({'send': 'GET /\r\n'}) + if self.want.cipher_list is None: + self.want.update({'cipher_list': 'DEFAULT:+SHA:+3DES:+kEDH'}) + if self.want.compatibility is None: + self.want.update({'compatibility': True}) + + def exec_module(self): + if not module_provisioned(self.client, 'gtm'): + raise F5ModuleError( + "GTM must be provisioned to use this module." + ) + changed = False + result = dict() + state = self.want.state + + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + + reportable = ReportableChanges(params=self.changes.to_return()) + changes = reportable.to_return() + result.update(**changes) + result.update(dict(changed=changed)) + self._announce_deprecations(result) + return result + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + if self.exists(): + return self.remove() + return False + + def update(self): + self.have = self.read_current_from_device() + if not self.should_update(): + return False + if self.module.check_mode: + return True + self.update_on_device() + return True + + def remove(self): + if self.module.check_mode: + return True + self.remove_from_device() + if self.exists(): + raise F5ModuleError("Failed to delete the resource.") + return True + + def create(self): + self._set_default_creation_values() + self._set_changed_options() + if self.module.check_mode: + return True + self.create_on_device() + return True + + def exists(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name), + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError: + return False + if resp.status == 404 or 'code' in response and response['code'] == 404: + return False + return True + + def create_on_device(self): + params = self.changes.api_params() + params['name'] = self.want.name + params['partition'] = self.want.partition + uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/".format( + self.client.provider['server'], + self.client.provider['server_port'], + ) + resp = self.client.api.post(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] in [400, 403]: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return response['selfLink'] + + def update_on_device(self): + params = self.changes.api_params() + uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name), + ) + resp = self.client.api.patch(uri, json=params) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + + def remove_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name), + ) + response = self.client.api.delete(uri) + if response.status == 200: + return True + raise F5ModuleError(response.content) + + def read_current_from_device(self): + uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format( + self.client.provider['server'], + self.client.provider['server_port'], + transform_name(self.want.partition, self.want.name), + ) + resp = self.client.api.get(uri) + try: + response = resp.json() + except ValueError as ex: + raise F5ModuleError(str(ex)) + + if 'code' in response and response['code'] == 400: + if 'message' in response: + raise F5ModuleError(response['message']) + else: + raise F5ModuleError(resp.content) + return ApiParameters(params=response) + + +class ArgumentSpec(object): + def __init__(self): + self.supports_check_mode = True + argument_spec = dict( + name=dict(required=True), + parent=dict(default='/Common/https'), + send=dict(), + receive=dict(), + ip=dict(), + port=dict(), + interval=dict(type='int'), + timeout=dict(type='int'), + ignore_down_response=dict(type='bool'), + transparent=dict(type='bool'), + probe_timeout=dict(type='int'), + reverse=dict(type='bool'), + target_username=dict(), + target_password=dict(no_log=True), + cipher_list=dict(), + compatibility=dict(type='bool'), + client_cert=dict(), + client_key=dict(), + update_password=dict( + default='always', + choices=['always', 'on_create'] + ), + state=dict( + default='present', + choices=['present', 'absent'] + ), + partition=dict( + default='Common', + fallback=(env_fallback, ['F5_PARTITION']) + ) + ) + self.argument_spec = {} + self.argument_spec.update(f5_argument_spec) + self.argument_spec.update(argument_spec) + + +def main(): + spec = ArgumentSpec() + + module = AnsibleModule( + argument_spec=spec.argument_spec, + supports_check_mode=spec.supports_check_mode, + ) + + try: + mm = ModuleManager(module=module) + results = mm.exec_module() + module.exit_json(**results) + except F5ModuleError as ex: + module.fail_json(msg=str(ex)) + + +if __name__ == '__main__': + main() + +"""Extended file operations available in POSIX. + +f = posixfile.open(filename, [mode, [bufsize]]) + will create a new posixfile object + +f = posixfile.fileopen(fileobject) + will create a posixfile object from a builtin file object + +f.file() + will return the original builtin file object + +f.dup() + will return a new file object based on a new filedescriptor + +f.dup2(fd) + will return a new file object based on the given filedescriptor + +f.flags(mode) + will turn on the associated flag (merge) + mode can contain the following characters: + + (character representing a flag) + a append only flag + c close on exec flag + n no delay flag + s synchronization flag + (modifiers) + ! turn flags 'off' instead of default 'on' + = copy flags 'as is' instead of default 'merge' + ? return a string in which the characters represent the flags + that are set + + note: - the '!' and '=' modifiers are mutually exclusive. + - the '?' modifier will return the status of the flags after they + have been changed by other characters in the mode string + +f.lock(mode [, len [, start [, whence]]]) + will (un)lock a region + mode can contain the following characters: + + (character representing type of lock) + u unlock + r read lock + w write lock + (modifiers) + | wait until the lock can be granted + ? return the first lock conflicting with the requested lock + or 'None' if there is no conflict. The lock returned is in the + format (mode, len, start, whence, pid) where mode is a + character representing the type of lock ('r' or 'w') + + note: - the '?' modifier prevents a region from being locked; it is + query only +""" +import warnings +warnings.warn("The posixfile module is deprecated; " + "fcntl.lockf() provides better locking", DeprecationWarning, 2) + +class _posixfile_: + """File wrapper class that provides extra POSIX file routines.""" + + states = ['open', 'closed'] + + # + # Internal routines + # + def __repr__(self): + file = self._file_ + return "<%s posixfile '%s', mode '%s' at %s>" % \ + (self.states[file.closed], file.name, file.mode, \ + hex(id(self))[2:]) + + # + # Initialization routines + # + def open(self, name, mode='r', bufsize=-1): + import __builtin__ + return self.fileopen(__builtin__.open(name, mode, bufsize)) + + def fileopen(self, file): + import types + if repr(type(file)) != "": + raise TypeError, 'posixfile.fileopen() arg must be file object' + self._file_ = file + # Copy basic file methods + for maybemethod in dir(file): + if not maybemethod.startswith('_'): + attr = getattr(file, maybemethod) + if isinstance(attr, types.BuiltinMethodType): + setattr(self, maybemethod, attr) + return self + + # + # New methods + # + def file(self): + return self._file_ + + def dup(self): + import posix + + if not hasattr(posix, 'fdopen'): + raise AttributeError, 'dup() method unavailable' + + return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode) + + def dup2(self, fd): + import posix + + if not hasattr(posix, 'fdopen'): + raise AttributeError, 'dup() method unavailable' + + posix.dup2(self._file_.fileno(), fd) + return posix.fdopen(fd, self._file_.mode) + + def flags(self, *which): + import fcntl, os + + if which: + if len(which) > 1: + raise TypeError, 'Too many arguments' + which = which[0] + else: which = '?' + + l_flags = 0 + if 'n' in which: l_flags = l_flags | os.O_NDELAY + if 'a' in which: l_flags = l_flags | os.O_APPEND + if 's' in which: l_flags = l_flags | os.O_SYNC + + file = self._file_ + + if '=' not in which: + cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) + if '!' in which: l_flags = cur_fl & ~ l_flags + else: l_flags = cur_fl | l_flags + + l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags) + + if 'c' in which: + arg = ('!' not in which) # 0 is don't, 1 is do close on exec + l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg) + + if '?' in which: + which = '' # Return current flags + l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) + if os.O_APPEND & l_flags: which = which + 'a' + if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1: + which = which + 'c' + if os.O_NDELAY & l_flags: which = which + 'n' + if os.O_SYNC & l_flags: which = which + 's' + return which + + def lock(self, how, *args): + import struct, fcntl + + if 'w' in how: l_type = fcntl.F_WRLCK + elif 'r' in how: l_type = fcntl.F_RDLCK + elif 'u' in how: l_type = fcntl.F_UNLCK + else: raise TypeError, 'no type of lock specified' + + if '|' in how: cmd = fcntl.F_SETLKW + elif '?' in how: cmd = fcntl.F_GETLK + else: cmd = fcntl.F_SETLK + + l_whence = 0 + l_start = 0 + l_len = 0 + + if len(args) == 1: + l_len = args[0] + elif len(args) == 2: + l_len, l_start = args + elif len(args) == 3: + l_len, l_start, l_whence = args + elif len(args) > 3: + raise TypeError, 'too many arguments' + + # Hack by davem@magnet.com to get locking to go on freebsd; + # additions for AIX by Vladimir.Marangozov@imag.fr + import sys, os + if sys.platform in ('netbsd1', + 'openbsd2', + 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', + 'freebsd6', 'freebsd7', 'freebsd8', + 'bsdos2', 'bsdos3', 'bsdos4'): + flock = struct.pack('lxxxxlxxxxlhh', \ + l_start, l_len, os.getpid(), l_type, l_whence) + elif sys.platform in ('aix3', 'aix4'): + flock = struct.pack('hhlllii', \ + l_type, l_whence, l_start, l_len, 0, 0, 0) + else: + flock = struct.pack('hhllhh', \ + l_type, l_whence, l_start, l_len, 0, 0) + + flock = fcntl.fcntl(self._file_.fileno(), cmd, flock) + + if '?' in how: + if sys.platform in ('netbsd1', + 'openbsd2', + 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', + 'bsdos2', 'bsdos3', 'bsdos4'): + l_start, l_len, l_pid, l_type, l_whence = \ + struct.unpack('lxxxxlxxxxlhh', flock) + elif sys.platform in ('aix3', 'aix4'): + l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \ + struct.unpack('hhlllii', flock) + elif sys.platform == "linux2": + l_type, l_whence, l_start, l_len, l_pid, l_sysid = \ + struct.unpack('hhllhh', flock) + else: + l_type, l_whence, l_start, l_len, l_sysid, l_pid = \ + struct.unpack('hhllhh', flock) + + if l_type != fcntl.F_UNLCK: + if l_type == fcntl.F_RDLCK: + return 'r', l_len, l_start, l_whence, l_pid + else: + return 'w', l_len, l_start, l_whence, l_pid + +def open(name, mode='r', bufsize=-1): + """Public routine to open a file as a posixfile object.""" + return _posixfile_().open(name, mode, bufsize) + +def fileopen(file): + """Public routine to get a posixfile object from a Python file object.""" + return _posixfile_().fileopen(file) + +# +# Constants +# +SEEK_SET = 0 +SEEK_CUR = 1 +SEEK_END = 2 + +# +# End of posixfile.py +# + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2010 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +from osv import osv +from osv import fields +from tools.translate import _ + +class product_product(osv.osv): + _name = 'product.product' + _inherit = 'product.product' + + _columns = { + 'list_price_copy': fields.related('list_price', type="float", readonly=True, store=False, string='Sale Price', + help='Base price for computing the customer price. Sometimes called the catalog price.'), + 'can_modify_prices': fields.boolean('Can modify prices', + help='If checked all users can modify the price of this product in a sale order or invoice.'), + } + + _defaults = { + 'can_modify_prices': False, + } + + def onchange_list_price(self, cr, uid, ids, list_price): + return {'value': {'list_price_copy': list_price}} + + def fields_get(self, cr, uid, allfields=None, context=None): + if not context: + context = {} + group_obj = self.pool.get('res.groups') + if group_obj.user_in_group(cr, uid, uid, 'dt_price_security.can_modify_prices', context=context): + context['can_modify_prices'] = True + else: + context['can_modify_prices'] = False + + ret = super(product_product, self).fields_get(cr, uid, allfields=allfields, context=context) + + if group_obj.user_in_group(cr, uid, uid, 'dt_price_security.can_modify_prices', context=context): + if 'list_price_copy' in ret: + ret['list_price_copy']['invisible'] = True + else: + if 'list_price' in ret: + ret['list_price']['invisible'] = True + + if group_obj.user_in_group(cr, uid, uid, 'price_security.hide_purchase_prices', context=context): + if 'standard_price' in ret: + ret['standard_price']['invisible'] = True + if 'cost_method' in ret: + ret['cost_method']['invisible'] = True + + return ret + + def write(self, cr, uid, ids, vals, context=None): + if 'list_price' in vals: + group_obj = self.pool.get('res.groups') + + if not group_obj.user_in_group(cr, uid, uid, 'dt_price_security.can_modify_prices', context=context): + title = _('Violation of permissions') + message = _('You do not have the necesary permissions to modify the price of the products') + raise osv.except_osv(title, message) + + return super(product_product, self).write(cr, uid, ids, vals, context=context) + + + +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +Integer constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +import _socket +from _socket import * + +import os, sys, io + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EAGAIN = getattr(errno, 'EAGAIN', 11) +EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) + +__all__ = ["getfqdn", "create_connection"] +__all__.extend(os._get_exports_list(_socket)) + + +_realsocket = socket + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = {} + errorTab[10004] = "The operation was interrupted." + errorTab[10009] = "A bad file handle was passed." + errorTab[10013] = "Permission denied." + errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT + errorTab[10022] = "An invalid operation was attempted." + errorTab[10035] = "The socket operation would block" + errorTab[10036] = "A blocking operation is already in progress." + errorTab[10048] = "The network address is in use." + errorTab[10054] = "The connection has been reset." + errorTab[10058] = "The network has been shut down." + errorTab[10060] = "The operation timed out." + errorTab[10061] = "Connection refused." + errorTab[10063] = "The name is too long." + errorTab[10064] = "The host is down." + errorTab[10065] = "The host is unreachable." + __all__.append("errorTab") + + +class socket(_socket.socket): + + """A subclass of _socket.socket adding the makefile() method.""" + + __slots__ = ["__weakref__", "_io_refs", "_closed"] + + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None): + _socket.socket.__init__(self, family, type, proto, fileno) + self._io_refs = 0 + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + if not self._closed: + self.close() + + def __repr__(self): + """Wrap __repr__() to reveal the real class name.""" + s = _socket.socket.__repr__(self) + if s.startswith(" socket object + + Return a new socket object connected to the same system resource. + """ + fd = dup(self.fileno()) + sock = self.__class__(self.family, self.type, self.proto, fileno=fd) + sock.settimeout(self.gettimeout()) + return sock + + def accept(self): + """accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + """ + fd, addr = self._accept() + sock = socket(self.family, self.type, self.proto, fileno=fd) + # Issue #7995: if no default timeout is set and the listening + # socket had a (non-zero) timeout, force the new socket in blocking + # mode to override platform-specific socket flags inheritance. + if getdefaulttimeout() is None and self.gettimeout(): + sock.setblocking(True) + return sock, addr + + def makefile(self, mode="r", buffering=None, *, + encoding=None, errors=None, newline=None): + """makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, + except the only mode characters supported are 'r', 'w' and 'b'. + The semantics are similar too. (XXX refactor to share code?) + """ + for c in mode: + if c not in {"r", "w", "b"}: + raise ValueError("invalid mode %r (only r, w, b allowed)") + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._io_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text + + def _decref_socketios(self): + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def _real_close(self, _ss=_socket.socket): + # This function should not reference any globals. See issue #808164. + _ss.close(self) + + def close(self): + # This function should not reference any globals. See issue #808164. + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + +def fromfd(fd, family, type, proto=0): + """ fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + """ + nfd = dup(fd) + return socket(family, type, proto, nfd) + +if hasattr(_socket.socket, "share"): + def fromshare(info): + """ fromshare(info) -> socket object + + Create a socket object from a the bytes object returned by + socket.share(pid). + """ + return socket(0, 0, 0, info) + +if hasattr(_socket, "socketpair"): + + def socketpair(family=None, type=SOCK_STREAM, proto=0): + """socketpair([family[, type[, proto]]]) -> (socket object, socket object) + + Create a pair of socket objects from the sockets returned by the platform + socketpair() function. + The arguments are the same as for socket() except the default family is + AF_UNIX if defined on the platform; otherwise, the default is AF_INET. + """ + if family is None: + try: + family = AF_UNIX + except NameError: + family = AF_INET + a, b = _socket.socketpair(family, type, proto) + a = socket(family, type, proto, a.detach()) + b = socket(family, type, proto, b.detach()) + return a, b + + +_blocking_errnos = { EAGAIN, EWOULDBLOCK } + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise IOError("cannot read from timed out object") + while True: + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except InterruptedError: + continue + except error as e: + if e.args[0] in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.args[0] in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + """ + name = name.strip() + if not name or name == '0.0.0.0': + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +_GLOBAL_DEFAULT_TIMEOUT = object() + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + err = None + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + else: + raise error("getaddrinfo returns an empty list") + +from sqlalchemy import schema as sa_schema, types as sqltypes, sql +import logging +from .. import compat +from ..ddl.base import _table_for_constraint, _fk_spec +import re +from ..compat import string_types + +log = logging.getLogger(__name__) + +MAX_PYTHON_ARGS = 255 + +try: + from sqlalchemy.sql.naming import conv + + def _render_gen_name(autogen_context, name): + if isinstance(name, conv): + return _f_name(_alembic_autogenerate_prefix(autogen_context), name) + else: + return name +except ImportError: + def _render_gen_name(autogen_context, name): + return name + + +class _f_name(object): + + def __init__(self, prefix, name): + self.prefix = prefix + self.name = name + + def __repr__(self): + return "%sf(%r)" % (self.prefix, _ident(self.name)) + + +def _ident(name): + """produce a __repr__() object for a string identifier that may + use quoted_name() in SQLAlchemy 0.9 and greater. + + The issue worked around here is that quoted_name() doesn't have + very good repr() behavior by itself when unicode is involved. + + """ + if name is None: + return name + elif compat.sqla_09 and isinstance(name, sql.elements.quoted_name): + if compat.py2k: + # the attempt to encode to ascii here isn't super ideal, + # however we are trying to cut down on an explosion of + # u'' literals only when py2k + SQLA 0.9, in particular + # makes unit tests testing code generation very difficult + try: + return name.encode('ascii') + except UnicodeError: + return compat.text_type(name) + else: + return compat.text_type(name) + elif isinstance(name, compat.string_types): + return name + + +def _render_potential_expr(value, autogen_context, wrap_in_text=True): + if isinstance(value, sql.ClauseElement): + if compat.sqla_08: + compile_kw = dict(compile_kwargs={'literal_binds': True}) + else: + compile_kw = {} + + if wrap_in_text: + template = "%(prefix)stext(%(sql)r)" + else: + template = "%(sql)r" + + return template % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "sql": compat.text_type( + value.compile(dialect=autogen_context['dialect'], + **compile_kw) + ) + } + + else: + return repr(value) + + +def _add_table(table, autogen_context): + args = [col for col in + [_render_column(col, autogen_context) for col in table.c] + if col] + \ + sorted([rcons for rcons in + [_render_constraint(cons, autogen_context) for cons in + table.constraints] + if rcons is not None + ]) + + if len(args) > MAX_PYTHON_ARGS: + args = '*[' + ',\n'.join(args) + ']' + else: + args = ',\n'.join(args) + + text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { + 'tablename': _ident(table.name), + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'args': args, + } + if table.schema: + text += ",\nschema=%r" % _ident(table.schema) + for k in sorted(table.kwargs): + text += ",\n%s=%r" % (k.replace(" ", "_"), table.kwargs[k]) + text += "\n)" + return text + + +def _drop_table(table, autogen_context): + text = "%(prefix)sdrop_table(%(tname)r" % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": _ident(table.name) + } + if table.schema: + text += ", schema=%r" % _ident(table.schema) + text += ")" + return text + + +def _get_index_rendered_expressions(idx, autogen_context): + if compat.sqla_08: + return [repr(_ident(getattr(exp, "name", None))) + if isinstance(exp, sa_schema.Column) + else _render_potential_expr(exp, autogen_context) + for exp in idx.expressions] + else: + return [ + repr(_ident(getattr(col, "name", None))) for col in idx.columns] + + +def _add_index(index, autogen_context): + """ + Generate Alembic operations for the CREATE INDEX of an + :class:`~sqlalchemy.schema.Index` instance. + """ + + has_batch = 'batch_prefix' in autogen_context + + if has_batch: + tmpl = "%(prefix)screate_index(%(name)r, [%(columns)s], "\ + "unique=%(unique)r%(kwargs)s)" + else: + tmpl = "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], "\ + "unique=%(unique)r%(schema)s%(kwargs)s)" + + text = tmpl % { + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'name': _render_gen_name(autogen_context, index.name), + 'table': _ident(index.table.name), + 'columns': ", ".join( + _get_index_rendered_expressions(index, autogen_context)), + 'unique': index.unique or False, + 'schema': (", schema=%r" % _ident(index.table.schema)) + if index.table.schema else '', + 'kwargs': ( + ', ' + + ', '.join( + ["%s=%s" % + (key, _render_potential_expr(val, autogen_context)) + for key, val in index.kwargs.items()])) + if len(index.kwargs) else '' + } + return text + + +def _drop_index(index, autogen_context): + """ + Generate Alembic operations for the DROP INDEX of an + :class:`~sqlalchemy.schema.Index` instance. + """ + has_batch = 'batch_prefix' in autogen_context + + if has_batch: + tmpl = "%(prefix)sdrop_index(%(name)r)" + else: + tmpl = "%(prefix)sdrop_index(%(name)r, "\ + "table_name=%(table_name)r%(schema)s)" + + text = tmpl % { + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'name': _render_gen_name(autogen_context, index.name), + 'table_name': _ident(index.table.name), + 'schema': ((", schema=%r" % _ident(index.table.schema)) + if index.table.schema else '') + } + return text + + +def _render_unique_constraint(constraint, autogen_context): + rendered = _user_defined_render("unique", constraint, autogen_context) + if rendered is not False: + return rendered + + return _uq_constraint(constraint, autogen_context, False) + + +def _add_unique_constraint(constraint, autogen_context): + """ + Generate Alembic operations for the ALTER TABLE .. ADD CONSTRAINT ... + UNIQUE of a :class:`~sqlalchemy.schema.UniqueConstraint` instance. + """ + return _uq_constraint(constraint, autogen_context, True) + + +def _uq_constraint(constraint, autogen_context, alter): + opts = [] + + has_batch = 'batch_prefix' in autogen_context + + if constraint.deferrable: + opts.append(("deferrable", str(constraint.deferrable))) + if constraint.initially: + opts.append(("initially", str(constraint.initially))) + if not has_batch and alter and constraint.table.schema: + opts.append(("schema", _ident(constraint.table.schema))) + if not alter and constraint.name: + opts.append( + ("name", + _render_gen_name(autogen_context, constraint.name))) + + if alter: + args = [ + repr(_render_gen_name(autogen_context, constraint.name))] + if not has_batch: + args += [repr(_ident(constraint.table.name))] + args.append(repr([_ident(col.name) for col in constraint.columns])) + args.extend(["%s=%r" % (k, v) for k, v in opts]) + return "%(prefix)screate_unique_constraint(%(args)s)" % { + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'args': ", ".join(args) + } + else: + args = [repr(_ident(col.name)) for col in constraint.columns] + args.extend(["%s=%r" % (k, v) for k, v in opts]) + return "%(prefix)sUniqueConstraint(%(args)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "args": ", ".join(args) + } + + +def _add_fk_constraint(constraint, autogen_context): + source_schema, source_table, \ + source_columns, target_schema, \ + target_table, target_columns = _fk_spec(constraint) + + args = [ + repr(_render_gen_name(autogen_context, constraint.name)), + repr(_ident(source_table)), + repr(_ident(target_table)), + repr([_ident(col) for col in source_columns]), + repr([_ident(col) for col in target_columns]) + ] + if source_schema: + args.append( + "%s=%r" % ('source_schema', source_schema), + ) + if target_schema: + args.append( + "%s=%r" % ('referent_schema', target_schema) + ) + + if constraint.deferrable: + args.append("%s=%r" % ("deferrable", str(constraint.deferrable))) + if constraint.initially: + args.append("%s=%r" % ("initially", str(constraint.initially))) + return "%(prefix)screate_foreign_key(%(args)s)" % { + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'args': ", ".join(args) + } + + +def _add_pk_constraint(constraint, autogen_context): + raise NotImplementedError() + + +def _add_check_constraint(constraint, autogen_context): + raise NotImplementedError() + + +def _add_constraint(constraint, autogen_context): + """ + Dispatcher for the different types of constraints. + """ + funcs = { + "unique_constraint": _add_unique_constraint, + "foreign_key_constraint": _add_fk_constraint, + "primary_key_constraint": _add_pk_constraint, + "check_constraint": _add_check_constraint, + "column_check_constraint": _add_check_constraint, + } + return funcs[constraint.__visit_name__](constraint, autogen_context) + + +def _drop_constraint(constraint, autogen_context): + """ + Generate Alembic operations for the ALTER TABLE ... DROP CONSTRAINT + of a :class:`~sqlalchemy.schema.UniqueConstraint` instance. + """ + + types = { + "unique_constraint": "unique", + "foreign_key_constraint": "foreignkey", + "primary_key_constraint": "primary", + "check_constraint": "check", + "column_check_constraint": "check", + } + + if 'batch_prefix' in autogen_context: + template = "%(prefix)sdrop_constraint"\ + "(%(name)r, type_=%(type)r)" + else: + template = "%(prefix)sdrop_constraint"\ + "(%(name)r, '%(table_name)s'%(schema)s, type_=%(type)r)" + + constraint_table = _table_for_constraint(constraint) + text = template % { + 'prefix': _alembic_autogenerate_prefix(autogen_context), + 'name': _render_gen_name(autogen_context, constraint.name), + 'table_name': _ident(constraint_table.name), + 'type': types[constraint.__visit_name__], + 'schema': (", schema='%s'" % _ident(constraint_table.schema)) + if constraint_table.schema else '', + } + return text + + +def _add_column(schema, tname, column, autogen_context): + if 'batch_prefix' in autogen_context: + template = "%(prefix)sadd_column(%(column)s)" + else: + template = "%(prefix)sadd_column(%(tname)r, %(column)s" + if schema: + template += ", schema=%(schema)r" + template += ")" + text = template % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": tname, + "column": _render_column(column, autogen_context), + "schema": schema + } + return text + + +def _drop_column(schema, tname, column, autogen_context): + if 'batch_prefix' in autogen_context: + template = "%(prefix)sdrop_column(%(cname)r)" + else: + template = "%(prefix)sdrop_column(%(tname)r, %(cname)r" + if schema: + template += ", schema=%(schema)r" + template += ")" + + text = template % { + "prefix": _alembic_autogenerate_prefix(autogen_context), + "tname": _ident(tname), + "cname": _ident(column.name), + "schema": _ident(schema) + } + return text + + +def _modify_col(tname, cname, + autogen_context, + server_default=False, + type_=None, + nullable=None, + existing_type=None, + existing_nullable=None, + existing_server_default=False, + schema=None): + indent = " " * 11 + + if 'batch_prefix' in autogen_context: + template = "%(prefix)salter_column(%(cname)r" + else: + template = "%(prefix)salter_column(%(tname)r, %(cname)r" + + text = template % { + 'prefix': _alembic_autogenerate_prefix( + autogen_context), + 'tname': tname, + 'cname': cname} + text += ",\n%sexisting_type=%s" % ( + indent, + _repr_type(existing_type, autogen_context)) + if server_default is not False: + rendered = _render_server_default( + server_default, autogen_context) + text += ",\n%sserver_default=%s" % (indent, rendered) + + if type_ is not None: + text += ",\n%stype_=%s" % (indent, + _repr_type(type_, autogen_context)) + if nullable is not None: + text += ",\n%snullable=%r" % ( + indent, nullable,) + if existing_nullable is not None: + text += ",\n%sexisting_nullable=%r" % ( + indent, existing_nullable) + if existing_server_default: + rendered = _render_server_default( + existing_server_default, + autogen_context) + text += ",\n%sexisting_server_default=%s" % ( + indent, rendered) + if schema and "batch_prefix" not in autogen_context: + text += ",\n%sschema=%r" % (indent, schema) + text += ")" + return text + + +def _user_autogenerate_prefix(autogen_context, target): + prefix = autogen_context['opts']['user_module_prefix'] + if prefix is None: + return "%s." % target.__module__ + else: + return prefix + + +def _sqlalchemy_autogenerate_prefix(autogen_context): + return autogen_context['opts']['sqlalchemy_module_prefix'] or '' + + +def _alembic_autogenerate_prefix(autogen_context): + if 'batch_prefix' in autogen_context: + return autogen_context['batch_prefix'] + else: + return autogen_context['opts']['alembic_module_prefix'] or '' + + +def _user_defined_render(type_, object_, autogen_context): + if 'opts' in autogen_context and \ + 'render_item' in autogen_context['opts']: + render = autogen_context['opts']['render_item'] + if render: + rendered = render(type_, object_, autogen_context) + if rendered is not False: + return rendered + return False + + +def _render_column(column, autogen_context): + rendered = _user_defined_render("column", column, autogen_context) + if rendered is not False: + return rendered + + opts = [] + if column.server_default: + rendered = _render_server_default( + column.server_default, autogen_context + ) + if rendered: + opts.append(("server_default", rendered)) + + if not column.autoincrement: + opts.append(("autoincrement", column.autoincrement)) + + if column.nullable is not None: + opts.append(("nullable", column.nullable)) + + # TODO: for non-ascii colname, assign a "key" + return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % { + 'prefix': _sqlalchemy_autogenerate_prefix(autogen_context), + 'name': _ident(column.name), + 'type': _repr_type(column.type, autogen_context), + 'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts]) + } + + +def _render_server_default(default, autogen_context, repr_=True): + rendered = _user_defined_render("server_default", default, autogen_context) + if rendered is not False: + return rendered + + if isinstance(default, sa_schema.DefaultClause): + if isinstance(default.arg, compat.string_types): + default = default.arg + else: + return _render_potential_expr(default.arg, autogen_context) + + if isinstance(default, string_types) and repr_: + default = repr(re.sub(r"^'|'$", "", default)) + + return default + + +def _repr_type(type_, autogen_context): + rendered = _user_defined_render("type", type_, autogen_context) + if rendered is not False: + return rendered + + mod = type(type_).__module__ + imports = autogen_context.get('imports', None) + if mod.startswith("sqlalchemy.dialects"): + dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) + if imports is not None: + imports.add("from sqlalchemy.dialects import %s" % dname) + return "%s.%r" % (dname, type_) + elif mod.startswith("sqlalchemy."): + prefix = _sqlalchemy_autogenerate_prefix(autogen_context) + return "%s%r" % (prefix, type_) + else: + prefix = _user_autogenerate_prefix(autogen_context, type_) + return "%s%r" % (prefix, type_) + + +def _render_constraint(constraint, autogen_context): + renderer = _constraint_renderers.get(type(constraint), None) + if renderer: + return renderer(constraint, autogen_context) + else: + return None + + +def _render_primary_key(constraint, autogen_context): + rendered = _user_defined_render("primary_key", constraint, autogen_context) + if rendered is not False: + return rendered + + if not constraint.columns: + return None + + opts = [] + if constraint.name: + opts.append(("name", repr( + _render_gen_name(autogen_context, constraint.name)))) + return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "args": ", ".join( + [repr(c.key) for c in constraint.columns] + + ["%s=%s" % (kwname, val) for kwname, val in opts] + ), + } + + +def _fk_colspec(fk, metadata_schema): + """Implement a 'safe' version of ForeignKey._get_colspec() that + never tries to resolve the remote table. + + """ + colspec = fk._get_colspec() + tokens = colspec.split(".") + tname, colname = tokens[-2:] + + if metadata_schema is not None and len(tokens) == 2: + table_fullname = "%s.%s" % (metadata_schema, tname) + else: + table_fullname = ".".join(tokens[0:-1]) + + if fk.parent is not None and fk.parent.table is not None: + # try to resolve the remote table and adjust for column.key + parent_metadata = fk.parent.table.metadata + if table_fullname in parent_metadata.tables: + colname = _ident(parent_metadata.tables[table_fullname].c[colname].name) + + colspec = "%s.%s" % (table_fullname, colname) + + return colspec + + +def _render_foreign_key(constraint, autogen_context): + rendered = _user_defined_render("foreign_key", constraint, autogen_context) + if rendered is not False: + return rendered + + opts = [] + if constraint.name: + opts.append(("name", repr( + _render_gen_name(autogen_context, constraint.name)))) + if constraint.onupdate: + opts.append(("onupdate", repr(constraint.onupdate))) + if constraint.ondelete: + opts.append(("ondelete", repr(constraint.ondelete))) + if constraint.initially: + opts.append(("initially", repr(constraint.initially))) + if constraint.deferrable: + opts.append(("deferrable", repr(constraint.deferrable))) + if constraint.use_alter: + opts.append(("use_alter", repr(constraint.use_alter))) + + apply_metadata_schema = constraint.parent.metadata.schema + return "%(prefix)sForeignKeyConstraint([%(cols)s], "\ + "[%(refcols)s], %(args)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "cols": ", ".join( + "%r" % _ident(f.parent.name) for f in constraint.elements), + "refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema)) + for f in constraint.elements), + "args": ", ".join( + ["%s=%s" % (kwname, val) for kwname, val in opts] + ), + } + + +def _render_check_constraint(constraint, autogen_context): + rendered = _user_defined_render("check", constraint, autogen_context) + if rendered is not False: + return rendered + + # detect the constraint being part of + # a parent type which is probably in the Table already. + # ideally SQLAlchemy would give us more of a first class + # way to detect this. + if constraint._create_rule and \ + hasattr(constraint._create_rule, 'target') and \ + isinstance(constraint._create_rule.target, + sqltypes.TypeEngine): + return None + opts = [] + if constraint.name: + opts.append( + ( + "name", + repr( + _render_gen_name(autogen_context, constraint.name)) + ) + ) + return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % { + "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), + "opts": ", " + (", ".join("%s=%s" % (k, v) + for k, v in opts)) if opts else "", + "sqltext": _render_potential_expr( + constraint.sqltext, autogen_context, wrap_in_text=False) + } + +_constraint_renderers = { + sa_schema.PrimaryKeyConstraint: _render_primary_key, + sa_schema.ForeignKeyConstraint: _render_foreign_key, + sa_schema.UniqueConstraint: _render_unique_constraint, + sa_schema.CheckConstraint: _render_check_constraint +} + +from __future__ import unicode_literals + +import os + +import mock +import pytest +import six + +from .. import lint as lint_mod +from ..lint import filter_whitelist_errors, parse_whitelist, lint + +_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy") + + +def _mock_lint(name): + wrapped = getattr(lint_mod, name) + return mock.patch(lint_mod.__name__ + "." + name, wraps=wrapped) + + +def test_filter_whitelist_errors(): + filtered = filter_whitelist_errors({}, '', []) + assert filtered == [] + + +def test_parse_whitelist(): + input_buffer = six.StringIO(""" +# Comment +CR AT EOL: svg/import/* +CR AT EOL: streams/resources/test-utils.js + +INDENT TABS: .gitmodules +INDENT TABS: app-uri/* +INDENT TABS: svg/* + +TRAILING WHITESPACE: app-uri/* + +CONSOLE:streams/resources/test-utils.js: 12 + +*:*.pdf +*:resources/* +""") + + expected_data = { + '.gitmodules': { + 'INDENT TABS': {None}, + }, + 'app-uri/*': { + 'TRAILING WHITESPACE': {None}, + 'INDENT TABS': {None}, + }, + 'streams/resources/test-utils.js': { + 'CONSOLE': {12}, + 'CR AT EOL': {None}, + }, + 'svg/*': { + 'INDENT TABS': {None}, + }, + 'svg/import/*': { + 'CR AT EOL': {None}, + }, + } + expected_ignored = {"*.pdf", "resources/*"} + data, ignored = parse_whitelist(input_buffer) + assert data == expected_data + assert ignored == expected_ignored + + +def test_lint_no_files(capsys): + rv = lint(_dummy_repo, [], False) + assert rv == 0 + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + +def test_lint_ignored_file(capsys): + with _mock_lint("check_path") as mocked_check_path: + with _mock_lint("check_file_contents") as mocked_check_file_contents: + rv = lint(_dummy_repo, ["broken_ignored.html"], False) + assert rv == 0 + assert not mocked_check_path.called + assert not mocked_check_file_contents.called + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + +def test_lint_not_existing_file(capsys): + with _mock_lint("check_path") as mocked_check_path: + with _mock_lint("check_file_contents") as mocked_check_file_contents: + # really long path-linted filename + name = "a" * 256 + ".html" + rv = lint(_dummy_repo, [name], False) + assert rv == 0 + assert not mocked_check_path.called + assert not mocked_check_file_contents.called + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + +def test_lint_passing(capsys): + with _mock_lint("check_path") as mocked_check_path: + with _mock_lint("check_file_contents") as mocked_check_file_contents: + rv = lint(_dummy_repo, ["okay.html"], False) + assert rv == 0 + assert mocked_check_path.call_count == 1 + assert mocked_check_file_contents.call_count == 1 + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + +def test_lint_failing(capsys): + with _mock_lint("check_path") as mocked_check_path: + with _mock_lint("check_file_contents") as mocked_check_file_contents: + rv = lint(_dummy_repo, ["broken.html"], False) + assert rv == 1 + assert mocked_check_path.call_count == 1 + assert mocked_check_file_contents.call_count == 1 + out, err = capsys.readouterr() + assert "TRAILING WHITESPACE" in out + assert "broken.html 1 " in out + assert err == "" + + +def test_lint_passing_and_failing(capsys): + with _mock_lint("check_path") as mocked_check_path: + with _mock_lint("check_file_contents") as mocked_check_file_contents: + rv = lint(_dummy_repo, ["broken.html", "okay.html"], False) + assert rv == 1 + assert mocked_check_path.call_count == 2 + assert mocked_check_file_contents.call_count == 2 + out, err = capsys.readouterr() + assert "TRAILING WHITESPACE" in out + assert "broken.html 1 " in out + assert "okay.html" not in out + assert err == "" + +# -*- coding: utf-8 -*- +#------------------------------------------------------------ +# pelisalacarta - XBMC Plugin +# Utilidades para detectar vídeos de los diferentes conectores +# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ +#------------------------------------------------------------ +#LvX Edited Patched +import re,sys + +from core import scrapertools +from core import config +from core import logger + +# Listas de servidores empleadas a la hora de reproducir para explicarle al usuario por qué no puede ver un vídeo + +# Lista de los servidores que se pueden ver sin cuenta premium de ningún tipo +FREE_SERVERS = [] +FREE_SERVERS.extend(['directo','allmyvideos','adnstream','bliptv','divxstage','facebook','fourshared', 'hulkshare', 'twitvid']) +FREE_SERVERS.extend(['googlevideo','gigabyteupload','mediafire','moevideos','movshare','novamov']) #,'putlocker' +FREE_SERVERS.extend(['royalvids','sockshare','stagevu','tutv','userporn','veoh','videobam']) +FREE_SERVERS.extend(['vidbux','videoweed','vimeo','vk','watchfreeinhd','youtube'])#,'videobeer','nowdownload' +FREE_SERVERS.extend(['jumbofiles','nowvideo','streamcloud', 'zinwa', 'dailymotion','justintv', 'vidbull']) +FREE_SERVERS.extend(['vureel','nosvideo','videopremium','movreel','flashx','upafile']) +FREE_SERVERS.extend(['fileflyer','playedto','tunepk','powvideo','videomega','mega','vidspot','netutv','rutube']) +FREE_SERVERS.extend(['videozed','documentary','hugefiles', 'firedrive','videott','tumitv','gamovideo']) +FREE_SERVERS.extend(['torrent','video4you','mailru','streaminto','backin','akstream', 'speedvideo', 'junkyvideo', 'rapidvideo']) + +# Lista de TODOS los servidores que funcionan con cuenta premium individual +PREMIUM_SERVERS = ['uploadedto','nowvideo'] + +# Lista de TODOS los servidores soportados por Filenium +FILENIUM_SERVERS = [] +FILENIUM_SERVERS.extend(['linkto','uploadedto','gigasize','youtube','filepost','hotfile','rapidshare','turbobit','mediafire','bitshare','depositfiles']) +FILENIUM_SERVERS.extend(['oron','allmyvideos','novamov','videoweed','movshare','letitbit','shareonline','shareflare','rapidgator']) +FILENIUM_SERVERS.extend(['filefactory','netload','nowdownload','filevelocity','freakshare','userporn','divxstage','putlocker','extabit','vidxden']) +FILENIUM_SERVERS.extend(['vimeo','dailymotion','jumbofiles','zippyshare','glumbouploads','bayfiles','twoshared', 'fourshared','crocko','fiberupload']) +FILENIUM_SERVERS.extend(['ifile','megashares','slingfile','uploading','vipfile','filenium','movreel','one80upload','flashx','nowvideo','vk','moevideos']) +FILENIUM_SERVERS.extend(['cloudzer','filecloudio','luckyshare','lumfile','playedto','ryushare','streamcloud','videozed','xenubox','filesmonster']) +#wupload,fileserve + +# Lista de TODOS los servidores soportados por Real-Debrid +REALDEBRID_SERVERS = ['one80upload','tenupload','onefichier','onehostclick','twoshared','fourfastfile','fourshared','abc','asfile','badongo','bayfiles','bitshare','cbscom','cloudzer','cramit','crocko','cwtv','dailymotion','dateito', + 'dengee','diglo','extabit','fiberupload','filedino','filefactory','fileflyer','filekeen','filemade','filemates','fileover','filepost', + 'filesend','filesmonster','filevelocity','freakshare','free','furk','fyels','gigasize','gigaup','glumbouploads','goldfile','hitfile','hipfile','hostingbulk', + 'hotfile','hulkshare','hulu','ifile','jakfile','jumbofiles','justintv','letitbit','loadto','mediafire','mega','megashare','megashares','mixturevideo','muchshare','netload', + 'novafile','nowdownload','purevid','putbit','putlocker','redtube','rapidgator','rapidshare','rutube','ryushare','scribd','sendspace','sharebees','shareflare','shragle','slingfile','sockshare', + 'soundcloud','speedyshare','turbobit','unibytes','uploadc','uploadedto','uploading','uploadspace','uptobox', + 'userporn','veevr','vidbux','vidhog','vidxden','vimeo','vipfile','wattv','xfileshare','youporn','youtube','yunfile','zippyshare','justintv','nowvideo','ultramegabit','filesmonster','oboom'] +#wupload,fileserve + +ALLDEBRID_SERVERS = ['one80upload','onefichier','twoshared','fourfastfile','fourshared','albafile','bayfiles','bitshare','cloudzer','cramit','crocko','cyberlocker','dailymotion','dengee', + 'depfile','dlfree','extabit','extmatrix','filefactory','fileflyer','filegag','filehost','fileover','filepost','filerio','filesabc', + 'filesend','filesmonster','filestay','freakshare','gigasize','hotfile','hulkshare','jumbofiles','letitbit','loadto','mediafire','megashares','mixturevideo','netload', + 'nitrobits','oteupload','purevid','putlocker','rapidgator','rapidshare','redtube','scribd','secureupload','sharebees','shareflare','slingfile','sockshare', + 'soundcloud','speedload','speedyshare','turbobit', 'uloadto', 'uploadc','uploadedto','uploading','uptobox', + 'userporn','vimeo','vipfile','youporn','youtube','yunfile','zippyshare','lumfile','ultramegabit','filesmonster'] + +# Lista completa de todos los servidores soportados por pelisalacarta, usada para buscar patrones +ALL_SERVERS = list( set(FREE_SERVERS) | set(FILENIUM_SERVERS) | set(REALDEBRID_SERVERS) | set(ALLDEBRID_SERVERS) ) +ALL_SERVERS.sort() + +# Función genérica para encontrar vídeos en una página +def find_video_items(item=None, data=None, channel=""): + logger.info("[launcher.py] findvideos") + + # Descarga la página + if data is None: + from core import scrapertools + data = scrapertools.cache_page(item.url) + #logger.info(data) + + # Busca los enlaces a los videos + from core.item import Item + from servers import servertools + listavideos = servertools.findvideos(data) + + if item is None: + item = Item() + + itemlist = [] + for video in listavideos: + scrapedtitle = item.title.strip() + " - " + video[0].strip() + scrapedurl = video[1] + server = video[2] + + itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) ) + + return itemlist + +def findvideosbyserver(data, serverid): + logger.info("[servertools.py] findvideos") + encontrados = set() + devuelve = [] + try: + exec "from servers import "+serverid + exec "devuelve.extend("+serverid+".find_videos(data))" + except ImportError: + logger.info("No existe conector para "+serverid) + except: + logger.info("Error en el conector "+serverid) + import traceback,sys + from pprint import pprint + exc_type, exc_value, exc_tb = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_tb) + for line in lines: + line_splits = line.split("\n") + for line_split in line_splits: + logger.error(line_split) + + return devuelve + +def findvideos(data): + logger.info("[servertools.py] findvideos") + encontrados = set() + devuelve = [] + + # Ejecuta el findvideos en cada servidor + for serverid in ALL_SERVERS: + try: + # Sustituye el código por otro "Plex compatible" + #exec "from servers import "+serverid + #exec "devuelve.extend("+serverid+".find_videos(data))" + servers_module = __import__("servers."+serverid) + server_module = getattr(servers_module,serverid) + devuelve.extend( server_module.find_videos(data) ) + except ImportError: + logger.info("No existe conector para "+serverid) + except: + logger.info("Error en el conector "+serverid) + import traceback,sys + from pprint import pprint + exc_type, exc_value, exc_tb = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_tb) + for line in lines: + line_splits = line.split("\n") + for line_split in line_splits: + logger.error(line_split) + + return devuelve + +def get_video_urls(server,url): + ''' + servers_module = __import__("servers."+server) + server_module = getattr(servers_module,server) + return server_module.get_video_url( page_url=url) + ''' + + video_urls,puede,motivo = resolve_video_urls_for_playing(server,url) + return video_urls + +def get_channel_module(channel_name): + channels_module = __import__("channels."+channel_name) + channel_module = getattr(channels_module,channel_name) + return channel_module + +def get_server_from_url(url): + encontrado = findvideos(url) + if len(encontrado)>0: + devuelve = encontrado[0][2] + else: + devuelve = "directo" + + return devuelve + +def resolve_video_urls_for_playing(server,url,video_password="",muestra_dialogo=False): + logger.info("[servertools.py] resolve_video_urls_for_playing, server="+server+", url="+url) + video_urls = [] + torrent = False + + server = server.lower() + + # Si el vídeo es "directo", no hay que buscar más + if server=="directo" or server=="local": + logger.info("[servertools.py] server=directo, la url es la buena") + + try: + import urlparse + parsed_url = urlparse.urlparse(url) + logger.info("parsed_url="+str(parsed_url)) + extension = parsed_url.path[-4:] + except: + extension = url[-4:] + + video_urls = [[ "%s [%s]" % (extension,server) , url ]] + return video_urls,True,"" + + # Averigua las URL de los vídeos + else: + + #if server=="torrent": + # server="filenium" + # torrent = True + + # Carga el conector + try: + # Muestra un diálogo de progreso + if muestra_dialogo: + import xbmcgui + progreso = xbmcgui.DialogProgress() + progreso.create( "pelisalacarta" , "Conectando con "+server) + + # Sustituye el código por otro "Plex compatible" + #exec "from servers import "+server+" as server_connector" + servers_module = __import__("servers."+server) + server_connector = getattr(servers_module,server) + + logger.info("[servertools.py] servidor de "+server+" importado") + if muestra_dialogo: + progreso.update( 20 , "Conectando con "+server) + + # Si tiene una función para ver si el vídeo existe, lo comprueba ahora + if hasattr(server_connector, 'test_video_exists'): + logger.info("[servertools.py] invocando a "+server+".test_video_exists") + puedes,motivo = server_connector.test_video_exists( page_url=url ) + + # Si la funcion dice que no existe, fin + if not puedes: + logger.info("[servertools.py] test_video_exists dice que el video no existe") + if muestra_dialogo: progreso.close() + return video_urls,puedes,motivo + else: + logger.info("[servertools.py] test_video_exists dice que el video SI existe") + + # Obtiene enlaces free + if server in FREE_SERVERS: + logger.info("[servertools.py] invocando a "+server+".get_video_url") + video_urls = server_connector.get_video_url( page_url=url , video_password=video_password ) + + # Si no se encuentran vídeos en modo free, es porque el vídeo no existe + if len(video_urls)==0: + if muestra_dialogo: progreso.close() + return video_urls,False,"No se puede encontrar el vídeo en "+server + + # Obtiene enlaces premium si tienes cuenta en el server + if server in PREMIUM_SERVERS and config.get_setting(server+"premium")=="true": + video_urls = server_connector.get_video_url( page_url=url , premium=(config.get_setting(server+"premium")=="true") , user=config.get_setting(server+"user") , password=config.get_setting(server+"password"), video_password=video_password ) + + # Si no se encuentran vídeos en modo premium directo, es porque el vídeo no existe + if len(video_urls)==0: + if muestra_dialogo: progreso.close() + return video_urls,False,"No se puede encontrar el vídeo en "+server + + # Obtiene enlaces filenium si tienes cuenta + if server in FILENIUM_SERVERS and config.get_setting("fileniumpremium")=="true": + + # Muestra un diálogo de progreso + if muestra_dialogo: + progreso.update( 40 , "Conectando con Filenium") + + from servers import filenium as gen_conector + + video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("fileniumpremium")=="true") , user=config.get_setting("fileniumuser") , password=config.get_setting("fileniumpassword"), video_password=video_password ) + extension = gen_conector.get_file_extension(video_gen) + logger.info("[xbmctools.py] filenium url="+video_gen) + video_urls.append( [ extension+" ["+server+"][filenium]", video_gen ] ) + + # Obtiene enlaces realdebrid si tienes cuenta + if server in REALDEBRID_SERVERS and config.get_setting("realdebridpremium")=="true": + + # Muestra un diálogo de progreso + if muestra_dialogo: + progreso.update( 60 , "Conectando con Real-Debrid") + + from servers import realdebrid as gen_conector + video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("realdebridpremium")=="true") , user=config.get_setting("realdebriduser") , password=config.get_setting("realdebridpassword"), video_password=video_password ) + logger.info("[xbmctools.py] realdebrid url="+video_gen) + if not "REAL-DEBRID" in video_gen: + video_urls.append( [ "."+video_gen.rsplit('.',1)[1]+" [realdebrid]", video_gen ] ) + else: + if muestra_dialogo: progreso.close() + # Si RealDebrid da error pero tienes un enlace válido, no te dice nada + if len(video_urls)==0: + return video_urls,False,video_gen + + # Obtiene enlaces alldebrid si tienes cuenta + if server in ALLDEBRID_SERVERS and config.get_setting("alldebridpremium")=="true": + + # Muestra un diálogo de progreso + if muestra_dialogo: + progreso.update( 80 , "Conectando con All-Debrid") + + from servers import alldebrid as gen_conector + video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("alldebridpremium")=="true") , user=config.get_setting("alldebriduser") , password=config.get_setting("alldebridpassword"), video_password=video_password ) + logger.info("[xbmctools.py] alldebrid url="+video_gen) + if video_gen.startswith("http"): + video_urls.append( [ "."+video_gen.rsplit('.',1)[1]+" [alldebrid]", video_gen ] ) + else: + # Si Alldebrid da error pero tienes un enlace válido, no te dice nada + if len(video_urls)==0: + return [],False,video_gen.strip() + + + if muestra_dialogo: + progreso.update( 100 , "Proceso finalizado") + + # Cierra el diálogo de progreso + if muestra_dialogo: progreso.close() + + # Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo + if len(video_urls)==0: + # ¿Cual es el motivo? + + # 1) No existe -> Ya está controlado + # 2) No tienes alguna de las cuentas premium compatibles + + # Lista de las cuentas que soportan este servidor + listapremium = "" + if server in ALLDEBRID_SERVERS: listapremium+="All-Debrid o " + if server in FILENIUM_SERVERS: listapremium+="Filenium o " + if server in REALDEBRID_SERVERS: listapremium+="Real-Debrid o " + if server in PREMIUM_SERVERS: listapremium+=server+" o " + listapremium = listapremium[:-3] + + return video_urls,False,"Para ver un vídeo en "+server+" necesitas
una cuenta en "+listapremium + + except: + if muestra_dialogo: progreso.close() + import traceback + from pprint import pprint + exc_type, exc_value, exc_tb = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_tb) + for line in lines: + line_splits = line.split("\n") + for line_split in line_splits: + logger.error(line_split) + + return video_urls,False,"Se ha producido un error en
el conector con "+server + + return video_urls,True,"" +#!/usr/bin/python + +import sys +import struct +import random +import signal + +try: + import gevent + from gevent import socket + from gevent.server import StreamServer + from gevent.socket import create_connection, gethostbyname +except: + print >>sys.stderr, "please install gevent first!" + sys.exit(1) + +import config + +keys = [] + +if config.seed: + r = random.Random(config.seed) + keys = [r.randint(0, 255) for i in xrange(0, 1024)] + keys += keys +else: + raise Exception("config.seed not set!") + +try: + import ctypes + + try: + filename = "./xor.so" + xor = ctypes.CDLL(filename) + except: + import platform + bits, exetype = platform.architecture() + filename = "./xor_%s_%s.so" % (exetype, bits) + xor = ctypes.CDLL(filename) + + print >>sys.stderr, "loaded %s, using faster xor" % filename + + key_str = ''.join(map(chr, keys)) + + if xor.set_xor_table(key_str, len(key_str)) == 1: + raise Exception("set xor table failed") + + def encrypt(data, pos): + ret = ctypes.create_string_buffer(data) + xor.xor(ret, len(data), pos) + return ret.raw[:-1] + +except: + + print >>sys.stderr, "can't load xor.so, using python native." + def encrypt(data, pos): + return ''.join(map(lambda x, y: chr(ord(x) ^ y), data, keys[pos:pos+len(data)])) + +decrypt = encrypt + +def dumps(x): + return ' '.join(map(lambda t: '%x' % struct.unpack('B', t)[0], x)) + +class XSocket(gevent.socket.socket): + def __init__(self, socket = None, addr = None, secure = False): + if socket is not None: + gevent.socket.socket.__init__(self, _sock = socket) + elif addr is not None: + gevent.socket.socket.__init__(self) + self.connect(addr) + else: + raise Exception("XSocket.init: bad arguments") + + self.secure = secure + self.recv_idx = 0 + self.send_idx = 0 + + def unpack(self, fmt, length): + data = self.recv(length) + if len(data) < length: + raise Exception("XSocket.unpack: bad formatted stream") + return struct.unpack(fmt, data) + + def pack(self, fmt, *args): + data = struct.pack(fmt, *args) + return self.sendall(data) + + def recv(self, length, *args): + data = gevent.socket.socket.recv(self, length, *args) + if config.debug: print 'Received:', dumps(data) + if self.secure: + data = decrypt(data, self.recv_idx) + self.recv_idx = (self.recv_idx + len(data)) % 1024 + if config.debug: print 'Decrypted:', dumps(data), '--', data + return data + + def sendall(self, data, flags = 0): + if config.debug: print 'Send:', dumps(data), '--', data + if self.secure: + data = encrypt(data, self.send_idx) + self.send_idx = (self.send_idx + len(data)) % 1024 + if config.debug: print 'Encrypted:', dumps(data) + return gevent.socket.socket.sendall(self, data, flags) + + def forward(self, dest): + try: + while True: + data = self.recv(1024) + if not data: + break + dest.sendall(data) + #except IOError, e: pass + finally: + print 'connection closed' + self.close() + dest.close() + + +class SocksServer(StreamServer): + def __init__(self, listener, **kwargs): + StreamServer.__init__(self, listener, **kwargs) + + def handle(self, sock, addr): + print 'connection from %s:%s' % addr + + src = XSocket(socket = sock, secure = True) + + #socks5 negotiation step2: specify command and destination + ver, cmd, rsv, atype = src.unpack('BBBB', 4) + + if cmd != 0x01: + src.pack('BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0) + return + + if atype == 0x01: #ipv4 + host, port = src.unpack('!IH', 6) + hostip = socket.inet_ntoa(struct.pack('!I', host)) + elif atype == 0x03: #domain name + length = src.unpack('B', 1)[0] + hostname, port = src.unpack("!%dsH" % length, length + 2) + hostip = gethostbyname(hostname) + host = struct.unpack("!I", socket.inet_aton(hostip))[0] + elif atype == 0x04: #ipv6: TODO + src.pack('!BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0) + return + else: + src.pack('!BBBBIH', 0x05, 0x07, 0x00, 0x01, 0, 0) + return + + try: + dest = XSocket(addr = (hostip, port)) + except IOError, ex: + print "%s:%d" % addr, "failed to connect to %s:%d" % (hostip, port) + src.pack('!BBBBIH', 0x05, 0x03, 0x00, 0x01, host, port) + return + + src.pack('!BBBBIH', 0x05, 0x00, 0x00, 0x01, host, port) + + gevent.spawn(src.forward, dest) + gevent.spawn(dest.forward, src) + + def close(self): + sys.exit(0) + + @staticmethod + def start_server(): + server = SocksServer(('0.0.0.0', config.server_port)) + gevent.signal(signal.SIGTERM, server.close) + gevent.signal(signal.SIGINT, server.close) + print "Server is listening on 0.0.0.0:%d" % config.server_port + server.serve_forever() + + +class PortForwarder(StreamServer): + def __init__(self, listener, dest, **kwargs): + StreamServer.__init__(self, listener, **kwargs) + self.destaddr = dest + + def handle(self, sock, addr): + + src = XSocket(socket = sock) + + #socks5 negotiation step1: choose an authentication method + ver, n_method = src.unpack('BB', 2) + + if ver != 0x05: + src.pack('BB', 0x05, 0xff) + return + + if n_method > 0: + src.recv(n_method) + + src.pack('!BB', 0x05, 0x00) #0x00 means no authentication needed + + print "Forwarder: connection from %s:%d" % addr + try: + dest = XSocket(addr = self.destaddr, secure = True) + except IOError, ex: + print "%s:%d" % addr, "failed to connect to SocksServer %s:%d" % self.destaddr + print ex + return + gevent.spawn(src.forward, dest) + gevent.spawn(dest.forward, src) + + def close(self): + sys.exit(0) + + @staticmethod + def start_server(): + forward_addr = (config.forward_host, config.forward_port) + server_addr = (config.server_host, config.server_port) + server = PortForwarder(forward_addr, server_addr) + + gevent.signal(signal.SIGTERM, server.close) + gevent.signal(signal.SIGINT, server.close) + print "Forwarder is listening on %s:%d for Server %s:%d" % \ + (config.forward_host, config.forward_port, \ + config.server_host, config.server_port) + server.serve_forever() + +if __name__ == '__main__': + import sys + if len(sys.argv) == 1: + PortForwarder.start_server() + else: + SocksServer.start_server() + +# +# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004 +# +# Written by Hye-Shik Chang +# + +import _codecs_jp, codecs +import _multibytecodec as mbc + +codec = _codecs_jp.getcodec('euc_jis_2004') + +class Codec(codecs.Codec): + encode = codec.encode + decode = codec.decode + +class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, + codecs.IncrementalEncoder): + codec = codec + +class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, + codecs.IncrementalDecoder): + codec = codec + +class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): + codec = codec + +class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): + codec = codec + +def getregentry(): + return codecs.CodecInfo( + name='euc_jis_2004', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +#!/usr/bin/env python +# +# Copyright 2013 Tristan Bereau and Christian Kramer +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#################### +# +# This script transforms a .pun file into a charmm-readable .lpun file. +# Optimized Parameters can be given + +import sys +import mtp_tools + +punfile = '' +parmfile = '' + +############## +# Read input + +for i in range(len(sys.argv)): + if sys.argv[i] == '-pun': + punfile = sys.argv[i+1] + elif sys.argv[i] == '-par': + parmfile = sys.argv[i+1] + elif sys.argv[i] == '-h': + print "Usage: python pun2charmmlpun.py -pun [file] [-par [parfile]] [-h]" + exit(0) + +if punfile == '': + print "Usage: python pun2charmmlpun.py -pun [file] [-par [parfile]] [-h]" + exit(0) + +############# +# Check that the file does not end in .lpun, otherwise quit. + +if punfile[punfile.rindex('.'):] == '.lpun': + print "Error: the script will generate a .lpun file, please rename current file." + exit(1) + +# Read prmfile if given + +prms = {} +if parmfile != '': + import numpy + f = open(parmfile,'r') + a = f.readlines() + f.close() + for line in a: + b = line.split() + prms[(b[0][2:-2],b[1][1:-2])] = numpy.array([float(b[i+3]) for i in range(len(b)-3)]) + +mol = mtp_tools.molecule() +mol.readfrompunfile(punfile) +mol.Calc_locMTP() + +if parmfile != '': + for atom in mol.atoms: + atom.chrg = prms[(atom.atype,'chrg')] + atom.dloc = prms[(atom.atype,'dloc')] + atom.Qloc = prms[(atom.atype,'Qloc')] + mol.Calc_gloMTP() + +mol.adjust_charge() +mol.write_localized_mtp_file(punfile[:punfile.rindex('.')]+'.lpun') + + + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Contains the Schema classes. + +A schema may be one of: + A record, mapping field names to field value data; + An error, equivalent to a record; + An enum, containing one of a small set of symbols; + An array of values, all of the same schema; + A map containing string/value pairs, each of a declared schema; + A union of other schemas; + A fixed sized binary object; + A unicode string; + A sequence of bytes; + A 32-bit signed int; + A 64-bit signed long; + A 32-bit floating-point float; + A 64-bit floating-point double; + A boolean; or + Null. +""" +try: + import json +except ImportError: + import simplejson as json + +# +# Constants +# + +PRIMITIVE_TYPES = ( + 'null', + 'boolean', + 'string', + 'bytes', + 'int', + 'long', + 'float', + 'double', +) + +NAMED_TYPES = ( + 'fixed', + 'enum', + 'record', + 'error', +) + +VALID_TYPES = PRIMITIVE_TYPES + NAMED_TYPES + ( + 'array', + 'map', + 'union', + 'request', + 'error_union' +) + +SCHEMA_RESERVED_PROPS = ( + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +) + +FIELD_RESERVED_PROPS = ( + 'default', + 'name', + 'doc', + 'order', + 'type', +) + +VALID_FIELD_SORT_ORDERS = ( + 'ascending', + 'descending', + 'ignore', +) + +# +# Exceptions +# + +class AvroException(Exception): + pass + +class SchemaParseException(AvroException): + pass + +# +# Base Classes +# + +class Schema(object): + """Base class for all Schema classes.""" + def __init__(self, type, other_props=None): + # Ensure valid ctor args + if not isinstance(type, basestring): + fail_msg = 'Schema type must be a string.' + raise SchemaParseException(fail_msg) + elif type not in VALID_TYPES: + fail_msg = '%s is not a valid type.' % type + raise SchemaParseException(fail_msg) + + # add members + if not hasattr(self, '_props'): self._props = {} + self.set_prop('type', type) + self.type = type + self._props.update(other_props or {}) + + # Read-only properties dict. Printing schemas + # creates JSON properties directly from this dict. + props = property(lambda self: self._props) + + # Read-only property dict. Non-reserved properties + other_props = property(lambda self: get_other_props(self._props, SCHEMA_RESERVED_PROPS), + doc="dictionary of non-reserved properties") + + # utility functions to manipulate properties dict + def get_prop(self, key): + return self._props.get(key) + + def set_prop(self, key, value): + self._props[key] = value + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names): + """ + Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception("Must be implemented by subclasses.") + +class Name(object): + """Class to describe Avro name.""" + + def __init__(self, name_attr, space_attr, default_space): + """ + Formulate full name according to the specification. + + @arg name_attr: name value read in schema or None. + @arg space_attr: namespace value read in schema or None. + @ard default_space: the current default space or None. + """ + # Ensure valid ctor args + if not (isinstance(name_attr, basestring) or (name_attr is None)): + fail_msg = 'Name must be non-empty string or None.' + raise SchemaParseException(fail_msg) + elif name_attr == "": + fail_msg = 'Name must be non-empty string or None.' + raise SchemaParseException(fail_msg) + + if not (isinstance(space_attr, basestring) or (space_attr is None)): + fail_msg = 'Space must be non-empty string or None.' + raise SchemaParseException(fail_msg) + elif name_attr == "": + fail_msg = 'Space must be non-empty string or None.' + raise SchemaParseException(fail_msg) + + if not (isinstance(default_space, basestring) or (default_space is None)): + fail_msg = 'Default space must be non-empty string or None.' + raise SchemaParseException(fail_msg) + elif name_attr == "": + fail_msg = 'Default must be non-empty string or None.' + raise SchemaParseException(fail_msg) + + self._full = None; + + if name_attr is None or name_attr == "": + return; + + if (name_attr.find('.') < 0): + if (space_attr is not None) and (space_attr != ""): + self._full = "%s.%s" % (space_attr, name_attr) + else: + if (default_space is not None) and (default_space != ""): + self._full = "%s.%s" % (default_space, name_attr) + else: + self._full = name_attr + else: + self._full = name_attr + + def __eq__(self, other): + if not isinstance(other, Name): + return False + return (self.fullname == other.fullname) + + fullname = property(lambda self: self._full) + + def get_space(self): + """Back out a namespace from full name.""" + if self._full is None: + return None + + if (self._full.find('.') > 0): + return self._full.rsplit(".", 1)[0] + else: + return "" + +class Names(object): + """Track name set and default namespace during parsing.""" + def __init__(self, default_namespace=None): + self.names = {} + self.default_namespace = default_namespace + + def has_name(self, name_attr, space_attr): + test = Name(name_attr, space_attr, self.default_namespace).fullname + return self.names.has_key(test) + + def get_name(self, name_attr, space_attr): + test = Name(name_attr, space_attr, self.default_namespace).fullname + if not self.names.has_key(test): + return None + return self.names[test] + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace""" + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del(prunable['namespace']) + return prunable + + def add_name(self, name_attr, space_attr, new_schema): + """ + Add a new schema object to the name set. + + @arg name_attr: name value read in schema + @arg space_attr: namespace value read in schema. + + @return: the Name that was just added. + """ + to_add = Name(name_attr, space_attr, self.default_namespace) + + if to_add.fullname in VALID_TYPES: + fail_msg = '%s is a reserved type name.' % to_add.fullname + raise SchemaParseException(fail_msg) + elif self.names.has_key(to_add.fullname): + fail_msg = 'The name "%s" is already in use.' % to_add.fullname + raise SchemaParseException(fail_msg) +