def _icmp_match(self, rule, protocol, icmp_code, icmp_type): return ( protocol == 'icmp' and protocol == rule['protocol'] and icmp_code == rule['icmpcode'] and icmp_type == rule['icmptype'] ) def _type_cidr_match(self, rule, cidr): return cidr == rule['cidrlist'] def create_firewall_rule(self): firewall_rule = self.get_firewall_rule() if not firewall_rule: self.result['changed'] = True args = { 'cidrlist': self.module.params.get('cidr'), 'protocol': self.module.params.get('protocol'), 'startport': self.module.params.get('start_port'), 'endport': self.get_or_fallback('end_port', 'start_port'), 'icmptype': self.module.params.get('icmp_type'), 'icmpcode': self.module.params.get('icmp_code') } fw_type = self.module.params.get('type') if not self.module.check_mode: if fw_type == 'egress': args['networkid'] = self.get_network(key='id') res = self.cs.createEgressFirewallRule(**args) else: args['ipaddressid'] = self.get_ip_address('id') res = self.cs.createFirewallRule(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: firewall_rule = self.poll_job(res, 'firewallrule') return firewall_rule def remove_firewall_rule(self): firewall_rule = self.get_firewall_rule() if firewall_rule: self.result['changed'] = True args = { 'id': firewall_rule['id'] } fw_type = self.module.params.get('type') if not self.module.check_mode: if fw_type == 'egress': res = self.cs.deleteEgressFirewallRule(**args) else: res = self.cs.deleteFirewallRule(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: self.poll_job(res, 'firewallrule') return firewall_rule def get_result(self, firewall_rule): super(AnsibleCloudStackFirewall, self).get_result(firewall_rule) if firewall_rule: self.result['type'] = self.module.params.get('type') if self.result['type'] == 'egress': self.result['network'] = self.get_network(key='displaytext') return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( ip_address=dict(), network=dict(), cidr=dict(default='0.0.0.0/0'), protocol=dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), type=dict(choices=['ingress', 'egress'], default='ingress'), icmp_type=dict(type='int'), icmp_code=dict(type='int'), start_port=dict(type='int', aliases=['port']), end_port=dict(type='int'), state=dict(choices=['present', 'absent'], default='present'), zone=dict(), domain=dict(), account=dict(), project=dict(), poll_async=dict(type='bool', default=True), )) required_together = cs_required_together() required_together.extend([ ['icmp_type', 'icmp_code'], ]) module = AnsibleModule( argument_spec=argument_spec, required_together=required_together, required_one_of=( ['ip_address', 'network'], ), mutually_exclusive=( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], ['ip_address', 'network'], ), supports_check_mode=True ) try: acs_fw = AnsibleCloudStackFirewall(module) state = module.params.get('state') if state in ['absent']: fw_rule = acs_fw.remove_firewall_rule() else: fw_rule = acs_fw.create_firewall_rule() result = acs_fw.get_result(fw_rule) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) if __name__ == '__main__': main() import os import numpy as np import pytest from rpcm import rpc_from_geotiff import s2p from tests_utils import data_path @pytest.fixture(name='matches') def fixture_matches(): matches = np.loadtxt( data_path(os.path.join('expected_output', 'units', 'unit_matches_from_rpc.txt')) ) return matches @pytest.fixture(name='images') def fixture_images(): res = [] for i in [1, 2]: im = data_path(os.path.join('input_pair', 'img_0{}.tif'.format(i))) rpc = rpc_from_geotiff(im) res.append(im) res.append(rpc) return res def test_rectification_homographies(matches): """ Test for rectification.rectification_homographies(). """ x, y, w, h = 100, 100, 200, 200 H1, H2, F = s2p.rectification.rectification_homographies(matches, x, y, w, h) for variable, filename in zip([H1, H2, F], ['H1.txt', 'H2.txt', 'F.txt']): expected = np.loadtxt(data_path(os.path.join('expected_output', 'units', filename))) np.testing.assert_allclose(variable, expected, rtol=0.01, atol=1e-6, verbose=True) def test_rectify_pair_no_matches(tmp_path, images): """ Test running rectification.rectify_pair() where no matches are found. """ im1, rpc1, im2, rpc2 = images with pytest.raises(s2p.rectification.NoRectificationMatchesError): s2p.rectification.rectify_pair( im1=im1, im2=im2, rpc1=rpc1, rpc2=rpc2, x=100, y=100, w=200, h=200, out1=str(tmp_path / 'out1.tiff'), out2=str(tmp_path / 'out2.tiff'), sift_matches=None, method='sift', ) def test_rectify_pair_few_matches(tmp_path, matches, images): """ Test running rectification.rectify_pair() where less than 4 matches are found. """ im1, rpc1, im2, rpc2 = images with pytest.raises(s2p.rectification.NoRectificationMatchesError): s2p.rectification.rectify_pair( im1=im1, im2=im2, rpc1=rpc1, rpc2=rpc2, x=100, y=100, w=200, h=200, out1=str(tmp_path / 'out1.tiff'), out2=str(tmp_path / 'out2.tiff'), sift_matches=matches[:3], method='sift', ) def test_rectify_pair_with_matches(tmp_path, matches, images): """ Test running rectification.rectify_pair() with some matches. """ im1, rpc1, im2, rpc2 = images s2p.rectification.rectify_pair( im1=im1, im2=im2, rpc1=rpc1, rpc2=rpc2, x=100, y=100, w=200, h=200, out1=str(tmp_path / 'out1.tiff'), out2=str(tmp_path / 'out2.tiff'), sift_matches=matches, method='sift', ) # coding: utf8 { '!langcode!': 'fr-ca', '!langname!': 'Français (Canadien)', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN', '%s %%{row} deleted': '%s rangées supprimées', '%s %%{row} updated': '%s rangées mises à jour', '%s selected': '%s sélectionné', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', 'about': 'à propos', 'About': 'À propos', 'Access Control': "Contrôle d'accès", 'Administrative Interface': 'Administrative Interface', 'Administrative interface': "Interface d'administration", 'Ajax Recipes': 'Recettes Ajax', 'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé", 'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', 'Authentication': 'Authentification', 'Available Databases and Tables': 'Bases de données et tables disponibles', 'Buy this book': 'Acheter ce livre', 'cache': 'cache', 'Cache': 'Cache', 'Cache Keys': 'Cache Keys', 'Cannot be empty': 'Ne peut pas être vide', 'change password': 'changer le mot de passe', 'Check to delete': 'Cliquez pour supprimer', 'Check to delete:': 'Cliquez pour supprimer:', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'Client IP': 'IP client', 'Community': 'Communauté', 'Components and Plugins': 'Components and Plugins', 'Controller': 'Contrôleur', 'Copyright': "Droit d'auteur", 'Current request': 'Demande actuelle', 'Current response': 'Réponse actuelle', 'Current session': 'Session en cours', 'customize me!': 'personnalisez-moi!', 'data uploaded': 'données téléchargées', 'Database': 'base de données', 'Database %s select': 'base de données %s select', 'db': 'db', 'DB Model': 'Modèle DB', 'Delete:': 'Supprimer:', 'Demo': 'Démo', 'Deployment Recipes': 'Recettes de déploiement ', 'Description': 'Descriptif', 'design': 'design', 'DISK': 'DISK', 'Disk Cache Keys': 'Disk Cache Keys', 'Disk Cleared': 'Disk Cleared', 'Documentation': 'Documentation', "Don't know what to do?": "Don't know what to do?", 'done!': 'fait!', 'Download': 'Téléchargement', 'E-mail': 'Courriel', 'Edit': 'Éditer', 'Edit current record': "Modifier l'enregistrement courant", 'edit profile': 'modifier le profil', 'Edit This App': 'Modifier cette application', 'Email and SMS': 'Email and SMS', 'enter an integer between %(min)g and %(max)g': 'entrer un entier compris entre %(min)g et %(max)g', 'Errors': 'Erreurs', 'export as csv file': 'exporter sous forme de fichier csv', 'FAQ': 'faq', 'First name': 'Prénom', 'Forms and Validators': 'Formulaires et Validateurs', 'Free Applications': 'Applications gratuites', 'Function disabled': 'Fonction désactivée', 'Group %(group_id)s created': '%(group_id)s groupe créé', 'Group ID': 'Groupe ID', 'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s", 'Groups': 'Groupes', 'Hello World': 'Bonjour le monde', 'Home': 'Accueil', 'How did you get here?': 'How did you get here?', 'import': 'import', 'Import/Export': 'Importer/Exporter', 'Index': 'Index', 'insert new': 'insérer un nouveau', 'insert new %s': 'insérer un nouveau %s', 'Internal State': 'État interne', 'Introduction': 'Présentation', 'Invalid email': 'Courriel invalide', 'Invalid Query': 'Requête Invalide', 'invalid request': 'requête invalide', 'Key': 'Key', 'Last name': 'Nom', 'Layout': 'Mise en page', 'Layout Plugins': 'Layout Plugins', 'Layouts': 'layouts', 'Live chat': 'Clavardage en direct', 'Live Chat': 'Live Chat', 'Logged in': 'Connecté', 'login': 'connectez-vous', 'Login': 'Connectez-vous', 'logout': 'déconnectez-vous', 'lost password': 'mot de passe perdu', 'Lost Password': 'Mot de passe perdu', 'lost password?': 'mot de passe perdu?', 'Main Menu': 'Menu principal', 'Manage Cache': 'Manage Cache', 'Menu Model': 'Menu modèle', 'My Sites': 'My Sites', 'Name': 'Nom', 'New Record': 'Nouvel enregistrement', 'new record inserted': 'nouvel enregistrement inséré', 'next 100 rows': '100 prochaines lignes', 'No databases in this application': "Cette application n'a pas de bases de données", 'Online examples': 'Exemples en ligne', 'or import from csv file': "ou importer d'un fichier CSV", 'Origin': 'Origine', 'Other Plugins': 'Other Plugins', 'Other Recipes': 'Autres recettes', 'Overview': 'Présentation', 'password': 'mot de passe', 'Password': 'Mot de passe', "Password fields don't match": 'Les mots de passe ne correspondent pas', 'please input your password again': "S'il vous plaît entrer votre mot de passe", 'Plugins': 'Plugiciels', 'Powered by': 'Alimenté par', 'Preface': 'Préface', 'previous 100 rows': '100 lignes précédentes', 'profile': 'profile', 'Python': 'Python', 'Query:': 'Requête:', 'Quick Examples': 'Examples Rapides', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Readme': 'Lisez-moi', 'Recipes': 'Recettes', 'Record': 'enregistrement', 'Record %(id)s created': 'Record %(id)s created', 'Record %(id)s updated': 'Record %(id)s updated', 'Record Created': 'Record Created', 'record does not exist': "l'archive n'existe pas", 'Record ID': "ID d'enregistrement", 'Record id': "id d'enregistrement", 'Record Updated': 'Record Updated', 'Register': "S'inscrire", 'register': "s'inscrire", 'Registration key': "Clé d'enregistrement", 'Registration successful': 'Inscription réussie', 'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)', 'Request reset password': 'Demande de réinitialiser le mot clé', 'Reset Password key': 'Réinitialiser le mot clé', 'Resources': 'Ressources', 'Role': 'Rôle', 'Rows in Table': 'Lignes du tableau', 'Rows selected': 'Lignes sélectionnées', 'Semantic': 'Sémantique', 'Services': 'Services', 'Size of cache:': 'Size of cache:', 'state': 'état', 'Statistics': 'Statistics', 'Stylesheet': 'Feuille de style', 'submit': 'submit', 'Submit': 'Soumettre', 'Support': 'Soutien', 'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', 'Table': 'tableau', 'Table name': 'Nom du tableau', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.', 'The Core': 'Le noyau', 'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s', 'The Views': 'Les Vues', 'This App': 'Cette Appli', 'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage", 'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)', 'Timestamp': 'Horodatage', 'Twitter': 'Twitter', 'unable to parse csv file': "incapable d'analyser le fichier cvs", 'Update:': 'Mise à jour:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.', 'User %(id)s Logged-in': 'Utilisateur %(id)s connecté', 'User %(id)s Registered': 'Utilisateur %(id)s enregistré', 'User ID': 'ID utilisateur', 'User Voice': 'User Voice', 'value already in database or empty': 'valeur déjà dans la base ou vide', 'Verify Password': 'Vérifiez le mot de passe', 'Videos': 'Vidéos', 'View': 'Présentation', 'Web2py': 'Web2py', 'Welcome': 'Bienvenu', 'Welcome %s': 'Bienvenue %s', 'Welcome to web2py': 'Bienvenue à web2py', 'Welcome to web2py!': 'Welcome to web2py!', 'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s', 'You are successfully running web2py': 'Vous roulez avec succès web2py', 'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins", 'You visited the url %s': "Vous avez visité l'URL %s", } """Support for the myStrom buttons.""" import logging from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice from homeassistant.components.http import HomeAssistantView from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up myStrom Binary Sensor.""" hass.http.register_view(MyStromView(async_add_entities)) return True class MyStromView(HomeAssistantView): """View to handle requests from myStrom buttons.""" url = "/api/mystrom" name = "api:mystrom" supported_actions = ["single", "double", "long", "touch"] def __init__(self, add_entities): """Initialize the myStrom URL endpoint.""" self.buttons = {} self.add_entities = add_entities async def get(self, request): """Handle the GET request received from a myStrom button.""" res = await self._handle(request.app["hass"], request.query) return res async def _handle(self, hass, data): """Handle requests to the myStrom endpoint.""" button_action = next( (parameter for parameter in data if parameter in self.supported_actions), None, ) if button_action is None: _LOGGER.error("Received unidentified message from myStrom button: %s", data) return ( "Received unidentified message: {}".format(data), HTTP_UNPROCESSABLE_ENTITY, ) button_id = data[button_action] entity_id = "{}.{}_{}".format(DOMAIN, button_id, button_action) if entity_id not in self.buttons: _LOGGER.info( "New myStrom button/action detected: %s/%s", button_id, button_action ) self.buttons[entity_id] = MyStromBinarySensor( "{}_{}".format(button_id, button_action) ) self.add_entities([self.buttons[entity_id]]) else: new_state = self.buttons[entity_id].state == "off" self.buttons[entity_id].async_on_update(new_state) class MyStromBinarySensor(BinarySensorDevice): """Representation of a myStrom button.""" def __init__(self, button_id): """Initialize the myStrom Binary sensor.""" self._button_id = button_id self._state = None @property def name(self): """Return the name of the sensor.""" return self._button_id @property def should_poll(self): """No polling needed.""" return False @property def is_on(self): """Return true if the binary sensor is on.""" return self._state def async_on_update(self, value): """Receive an update.""" self._state = value self.async_schedule_update_ha_state() # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for basic component wise operations using a GPU device.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import threading import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args from tensorflow.python.platform import test class GPUBinaryOpsTest(test.TestCase): def _compareGPU(self, x, y, np_func, tf_func): with self.cached_session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = self.evaluate(out) with self.cached_session(use_gpu=False) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_cpu = self.evaluate(out) self.assertAllClose(tf_cpu, tf_gpu) def testFloatBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv) self._compareGPU(x, y, np.power, math_ops.pow) def testFloatWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleBasic(self): x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) def testDoubleWithBCast(self): x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64) y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) self._compareGPU(x, y, np.add, math_ops.add) self._compareGPU(x, y, np.subtract, math_ops.subtract) self._compareGPU(x, y, np.multiply, math_ops.multiply) self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv) class MathBuiltinUnaryTest(test.TestCase): def _compare(self, x, np_func, tf_func, use_gpu): np_out = np_func(x) with self.cached_session(use_gpu=use_gpu) as sess: inx = ops.convert_to_tensor(x) ofunc = tf_func(inx) tf_out = self.evaluate(ofunc) self.assertAllClose(np_out, tf_out) def _inv(self, x): return 1.0 / x def _rsqrt(self, x): return self._inv(np.sqrt(x)) def _testDtype(self, dtype, use_gpu): data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype) data_gt_1 = data + 2 # for x > 1 self._compare(data, np.abs, math_ops.abs, use_gpu) self._compare(data, np.arccos, math_ops.acos, use_gpu) self._compare(data, np.arcsin, math_ops.asin, use_gpu) self._compare(data, np.arcsinh, math_ops.asinh, use_gpu) self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu) self._compare(data, np.arctan, math_ops.atan, use_gpu) self._compare(data, np.ceil, math_ops.ceil, use_gpu) self._compare(data, np.cos, math_ops.cos, use_gpu) self._compare(data, np.cosh, math_ops.cosh, use_gpu) self._compare(data, np.exp, math_ops.exp, use_gpu) self._compare(data, np.floor, math_ops.floor, use_gpu) self._compare(data, np.log, math_ops.log, use_gpu) self._compare(data, np.log1p, math_ops.log1p, use_gpu) self._compare(data, np.negative, math_ops.negative, use_gpu) self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu) self._compare(data, np.sin, math_ops.sin, use_gpu) self._compare(data, np.sinh, math_ops.sinh, use_gpu) self._compare(data, np.sqrt, math_ops.sqrt, use_gpu) self._compare(data, np.square, math_ops.square, use_gpu) self._compare(data, np.tan, math_ops.tan, use_gpu) self._compare(data, np.tanh, math_ops.tanh, use_gpu) self._compare(data, np.arctanh, math_ops.atanh, use_gpu) def testTypes(self): for dtype in [np.float32]: self._testDtype(dtype, use_gpu=True) def testFloorDivide(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) np_out = np.floor_divide(x, y + 0.1) with self.session(use_gpu=True) as sess: inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y + 0.1) ofunc = inx / iny out_func2 = math_ops.floor(ofunc) tf_out = self.evaluate(out_func2) self.assertAllClose(np_out, tf_out) class BroadcastSimpleTest(test.TestCase): def _GetGradientArgs(self, xs, ys): with self.cached_session(use_gpu=True) as sess: return sess.run(broadcast_gradient_args(xs, ys)) @test_util.run_deprecated_v1 def testBroadcast(self): r0, r1 = self._GetGradientArgs([2, 3, 5], [1]) self.assertAllEqual(r0, []) self.assertAllEqual(r1, [0, 1, 2]) _GRAD_TOL = {dtypes.float32: 1e-3} def _compareGradientX(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) xs = list(x.shape) jacob_t, jacob_n = gradient_checker.compute_gradient( inx, xs, out, zs, x_init_value=x) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGradientY(self, x, y, np_func, tf_func, numeric_gradient_type=None): z = np_func(x, y) zs = list(z.shape) with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) if x.dtype in (np.float32, np.float64): out = 1.1 * tf_func(inx, iny) else: out = tf_func(inx, iny) ys = list(np.shape(y)) jacob_t, jacob_n = gradient_checker.compute_gradient( iny, ys, out, zs, x_init_value=y) tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)] self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol) def _compareGpu(self, x, y, np_func, tf_func): np_ans = np_func(x, y) with self.cached_session(use_gpu=True): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = self.evaluate(out) self.assertAllClose(np_ans, tf_gpu) self.assertShapeEqual(np_ans, out) # TODO(zhifengc/ke): make gradient checker work on GPU. @test_util.run_deprecated_v1 def testGradient(self): x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) self._compareGradientX(x, y, np.true_divide, math_ops.truediv) self._compareGradientY(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y, np.true_divide, math_ops.truediv) self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv) class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase): """Tests concurrent sessions executing on the same GPU.""" def _run_session(self, session, results): n_iterations = 500 with session as s: data = variables.Variable(1.0) with ops.device('/device:GPU:0'): random_seed.set_random_seed(1) matrix1 = variables.Variable( random_ops.truncated_normal([1024, 1]), name='matrix1') matrix2 = variables.Variable( random_ops.truncated_normal([1, 1024]), name='matrix2') x1 = math_ops.multiply(data, matrix1, name='x1') x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1)) x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4') s.run(variables.global_variables_initializer()) for _ in xrange(n_iterations): value = s.run(x4) results.add(value.flat[0]) if len(results) != 1: break @test_util.run_v1_only('b/126596827 needs graph mode in multiple threads') def testConcurrentSessions(self): n_threads = 4 threads = [] results = [] for _ in xrange(n_threads): session = self.session(graph=ops.Graph(), use_gpu=True) results.append(set()) args = (session, results[-1]) threads.append(threading.Thread(target=self._run_session, args=args)) for thread in threads: thread.start() for thread in threads: thread.join() flat_results = set(itertools.chain(*results)) self.assertEqual(1, len(flat_results), 'Expected single value, got %r' % flat_results) if __name__ == '__main__': test.main() import json from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.serializers.json import DjangoJSONEncoder from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.shortcuts import render, render_to_response from django.template.loader import render_to_string from django.test import Client from django.test.client import CONTENT_TYPE_RE from django.test.utils import setup_test_environment from django.utils.six.moves.urllib.parse import urlencode class CustomTestException(Exception): pass def no_template_view(request): "A simple view that expects a GET request, and returns a rendered template" return HttpResponse("No template used. Sample content: twice once twice. Content ends.") def staff_only_view(request): "A view that can only be visited by staff. Non staff members get an exception" if request.user.is_staff: return HttpResponse('') else: raise CustomTestException() def get_view(request): "A simple login protected view" return HttpResponse("Hello world") get_view = login_required(get_view) def request_data(request, template='base.html', data='sausage'): "A simple view that returns the request data in the context" return render_to_response(template, { 'get-foo': request.GET.get('foo'), 'get-bar': request.GET.get('bar'), 'post-foo': request.POST.get('foo'), 'post-bar': request.POST.get('bar'), 'data': data, }) def view_with_argument(request, name): """A view that takes a string argument The purpose of this view is to check that if a space is provided in the argument, the test framework unescapes the %20 before passing the value to the view. """ if name == 'Arthur Dent': return HttpResponse('Hi, Arthur') else: return HttpResponse('Howdy, %s' % name) def nested_view(request): """ A view that uses test client to call another view. """ setup_test_environment() c = Client() c.get("/no_template_view/") return render_to_response('base.html', {'nested': 'yes'}) def login_protected_redirect_view(request): "A view that redirects all requests to the GET view" return HttpResponseRedirect('/get_view/') login_protected_redirect_view = login_required(login_protected_redirect_view) def redirect_to_self_with_changing_query_view(request): query = request.GET.copy() query['counter'] += '0' return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query)) def set_session_view(request): "A view that sets a session variable" request.session['session_var'] = 'YES' return HttpResponse('set_session') def check_session_view(request): "A view that reads a session variable" return HttpResponse(request.session.get('session_var', 'NO')) def request_methods_view(request): "A view that responds with the request method" return HttpResponse('request method: %s' % request.method) def return_unicode(request): return render_to_response('unicode.html') def return_undecodable_binary(request): return HttpResponse( b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com' ) def return_json_response(request): return JsonResponse({'key': 'value'}) def return_json_file(request): "A view that parses and returns a JSON string as a file." match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE']) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET # This just checks that the uploaded data is JSON obj_dict = json.loads(request.body.decode(charset)) obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False) response = HttpResponse(obj_json.encode(charset), status=200, content_type='application/json; charset=%s' % charset) response['Content-Disposition'] = 'attachment; filename=testfile.json' return response def check_headers(request): "A view that responds with value of the X-ARG-CHECK header" return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined')) def body(request): "A view that is requested with GET and accesses request.body. Refs #14753." return HttpResponse(request.body) def read_all(request): "A view that is requested with accesses request.read()." return HttpResponse(request.read()) def read_buffer(request): "A view that is requested with accesses request.read(LARGE_BUFFER)." return HttpResponse(request.read(99999)) def request_context_view(request): # Special attribute that won't be present on a plain HttpRequest request.special_path = request.path return render(request, 'request_context.html') def render_template_multiple_times(request): """A view that renders a template multiple times.""" return HttpResponse( render_to_string('base.html') + render_to_string('base.html')) #!/usr/bin/env python # -*- coding: UTF-8 -*- from subprocess import Popen, PIPE import re # change those symbols to whatever you prefer symbols = { 'ahead of': '↑', 'behind': '↓', 'staged': '♦', 'changed': '‣', 'untracked': '…', 'clean': '⚡', 'unmerged': '≠', 'sha1': ':' } output, error = Popen( ['git', 'status'], stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate() if error: import sys sys.exit(0) lines = output.splitlines() behead_re = re.compile( r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit") diverge_re = re.compile(r"^# and have (\d+) and (\d+) different") status = '' staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE) changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE) untracked = re.compile(r'^# Untracked files:$', re.MULTILINE) unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE) def execute(*command): out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate() if not err: nb = len(out.splitlines()) else: nb = '?' return nb if staged.search(output): nb = execute( ['git', 'diff', '--staged', '--name-only', '--diff-filter=ACDMRT']) status += '%s%s' % (symbols['staged'], nb) if unmerged.search(output): nb = execute(['git', 'diff', '--staged', '--name-only', '--diff-filter=U']) status += '%s%s' % (symbols['unmerged'], nb) if changed.search(output): nb = execute(['git', 'diff', '--name-only', '--diff-filter=ACDMRT']) status += '%s%s' % (symbols['changed'], nb) if untracked.search(output): status += symbols['untracked'] if status == '': status = symbols['clean'] remote = '' bline = lines[0] if bline.find('Not currently on any branch') != -1: branch = symbols['sha1'] + Popen([ 'git', 'rev-parse', '--short', 'HEAD'], stdout=PIPE).communicate()[0][:-1] else: branch = bline.split(' ')[-1] bstatusline = lines[1] match = behead_re.match(bstatusline) if match: remote = symbols[match.groups()[0]] remote += match.groups()[2] elif lines[2:]: div_match = diverge_re.match(lines[2]) if div_match: remote = "{behind}{1}{ahead of}{0}".format( *div_match.groups(), **symbols) print('\n'.join([branch, remote, status])) #************************************************************************** #* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. * #* * #* Author: The ALICE Off-line Project. * #* Contributors are mentioned in the code where appropriate. * #* * #* Permission to use, copy, modify and distribute this software and its * #* documentation strictly for non-commercial purposes is hereby granted * #* without fee, provided that the above copyright notice appears in all * #* copies and that both the copyright notice and this permission notice * #* appear in the supporting documentation. The authors make no claims * #* about the suitability of this software for any purpose. It is * #* provided "as is" without express or implied warranty. * #************************************************************************** """ Wrapper class around a ROOT THnSparse adding the functionality of cut and project in one step, making the handling of THnSparses a lot more handy for users. @author: Markus """ from copy import copy,deepcopy from numpy import array as nparray class AxisFormat(object): ''' Definition of the axis format of a THnSparse ''' def __init__(self, formatname): ''' Constructor ''' self._axes = {} self.__formatname = "" def GetAxes(self): ''' Get the list of axes defined ''' return self._axes def FindAxis(self, axisname): ''' Find axis by axis name. Returns the dimension of the axis. ''' result = -1 if axisname in self._axes.keys(): result = self._axes[axisname] return result def _Deepcopy(self, other, memo): ''' Performing deep copy ''' self._axes = deepcopy(other.GetAxes(), memo) def _Copy(self, other): ''' Performing shallow copy ''' self._axes = copy(other.GetAxes()) def GetAxisName(self, dim): ''' Get the name of the axis by dimension ''' if not dim in self._axes.values(): return "" result = "" for k,v in self._axes.iteritems(): if v == dim: result = k break return result def Print(self): for axis, dimension in self._axes.iteritems(): print "Axis %s with dimension %d" %(axis, dimension) class THnSparseCut(object): ''' Cut class used in the THnSparse wrapper ''' def __init__(self, axisname, minv, maxv): ''' Constructor ''' self.__axisname = axisname self.__minimum = minv self.__maximum = maxv def GetCutname(self): ''' Get axis name ''' return self.__axisname def GetMinimum(self): ''' Get the minimum of the range ''' return self.__minimum def GetMaximum(self): ''' Get the maximum of the range ''' return self.__maximum def SetMinimum(self, minv): ''' Set the minimum of the range ''' self.__minimum = minv def SetMaximum(self, maxv): ''' Set the maximum of the range ''' self.__maximum = maxv class THnSparseWrapper(object): ''' Wrapper class around THnSparse applying cuts on axes and performing projections ''' def __init__(self, rootthnsparse): ''' Constructor ''' self._rootthnsparse = rootthnsparse self._axisdefinition = None self._cutlist = [] def __deepcopy__(self, memo): ''' Deep copy constructor ''' result = THnSparseWrapper(deepcopy(self._rootthnsparse)) result.CopyCuts(self._cutlist, True) return result def __copy__(self): ''' Shallow copy constructor ''' result = THnSparseWrapper(copy(self._rootthnsparse)) result.CopyCuts(self._cutlist, False) return result def CopyCuts(self, reference, isDeep): ''' Copy cuts into this object from a reference object ''' for cut in reference.GetListOfCuts(): newcut = None if isDeep: newcut = deepcopy(cut) else: newcut = copy(cut) self._cutlist.append(newcut) def GetListOfCuts(self): ''' Access list of cuts ''' return self._cutlist def GetHistogram(self): ''' Access to underlying root histogram ''' return self._rootthnsparse def GetHistogramName(self): ''' Get the name of the underlying histogram ''' return self._rootthnsparse.GetName() def Add(self, otherwrapper): self._rootthnsparse.Add(otherwrapper.GetHistogram()) def Scale(self, scalefactor): self._rootthnsparse.Scale(scalefactor) def GetAxisDefinition(self): return self._axisdefinition def ApplyCut(self, axisname, minv, maxv): ''' Apply cut on a given variable, defined by its axis name minv and maxv define the range. If either of them is None, the range is open on one side. ''' if not self._axisdefinition or self._axisdefinition.FindAxis(axisname) < 0: print "No axis definition or axis name (%s) not found" %(axisname) if self._axisdefinition: print "Known axes:" self._axisdefinition.Print() return existing = self.__FindCut(axisname) if not existing: self._cutlist.append(THnSparseCut(axisname, minv, maxv)) else: existing.SetMinimum(minv) existing.SetMaximum(maxv) def RemoveCut(self, axisname): ''' Remove cut again from the list ''' for entry in self._cutlist: if entry.GetCutname() == axisname: self._cutlist.remove(entry) def ResetAxis(self, axisname): ''' Reset axis range ''' if not self._axisdefinition or self._axisdefinition.FindAxis(axisname) < 0: return myaxis = self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis(axisname)) myaxis.SetRange(0, myaxis.GetNbins()+1) def Projection1D(self, histname, axisname): ''' Make projection, applying cuts defined before, and releasing the cuts afterwards. Projects to 1D with the axisname as dimension ''' if not self._axisdefinition or self._axisdefinition.FindAxis(axisname) < 0: print "No axis definition or axis %s not found" %(axisname) return None self._PrepareProjection() result = self._rootthnsparse.Projection(self._axisdefinition.FindAxis(axisname)) result.SetName(histname) self._CleanumProjection() return result def Projection2D(self, histname, axisdictionary): ''' Make projection, applying cuts defined before, and releasing the cuts afterwards. Projects to 2D with the content in the axis dictionary as dimensions Dictionary works in the way name -> dimension, starting with 0 ''' if not self._axisdefinition: return None hasfound = True for axisname in axisdictionary.keys(): if self._axisdefinition.FindAxis(axisname): hasfound = False break if not hasfound: return None self._PrepareProjection() xdim = None ydim = None for k,v in axisdictionary.iteritems(): if v == 1: ydim = self._axisdefinition.FindAxis(k) else: xdim = self._axisdefinition.FindAxis(k) result = self._rootthnsparse.Projection(ydim, xdim) result.SetName(histname) self._CleanumProjection() return result def ProjectionND(self, histname, axisdictionary): ''' Make projection, applying cuts defined before, and releasing the cuts afterwards. Projects to 2D with the content in the axis dictionary as dimensions Dictionary works in the way name -> dimension, starting with 0 ''' if not self._axisdefinition: return None hasfound = True for axisname in axisdictionary.keys(): if self._axisdefinition.FindAxis(axisname): hasfound = False break if not hasfound: return None self._PrepareProjection() axismap = {} for k,v in axisdictionary.iteritems(): axismap[v] = k axislist = [] for mydim in sorted(axismap.keys()): axislist.append(self._axisdefinition.FindAxis(axismap[mydim])) result = self._rootthnsparse.Projection(len(axislist), nparray(axislist)) result.SetName(histname) self._CleanumProjection() return result def _PrepareProjection(self): ''' Apply all requested cuts before the projection ''' for entry in self._cutlist: myaxis = self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis(entry.GetCutname())) minv = 0 if not entry.GetMinimum() else myaxis.FindBin(entry.GetMinimum()) maxv = myaxis.GetNbins()+1 if not entry.GetMaximum() else myaxis.FindBin(entry.GetMaximum()) myaxis.SetRange(minv, maxv) def _CleanumProjection(self): ''' Reset all possible axis cuts Does not remove a cut again from the list, but only releases the THnSparse ''' for entry in self._cutlist: self.ResetAxis(entry.GetCutname()) def __FindCut(self, cutname): ''' Find cut in list by the axis name ''' if not len(self._cutlist): return None result = None for entry in self._cutlist: if entry.GetCutname() == cutname: result = entry break return result # Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return import os from subprocess import Popen, PIPE from typeclassifier import TypeClassifier class HardwareControl: def __init__(self): self.classifier = TypeClassifier("fastText/voiceai-hardware.bin", "fastText/fasttext") self.backlightDir = "/sys/class/backlight/acpi_video0/" self.maxBrightnessDir = self.backlightDir+'max_brightness' self.brightnessDir = self.backlightDir+'brightness' self.brightCmd = 'tee' f = open(self.maxBrightnessDir, 'r') self.max_brightness = int(f.readline()) f.close() f = open(self.brightnessDir, 'r') self.brightness = int(f.readline()) f.close() self.volumeCmd = "amixer set 'Master' " def textFilter(self, tagged): keep_words = ['xVB', 'xRP', 'xNN'] change_tags = ['xCD'] # change tags -> keep tags -> return array of tuple filtered_tags = [] for tup in tagged: for k_w in keep_words: if tup[1] == k_w: filtered_tags.append(tup) break for c_t in change_tags: if tup[1] == c_t: filtered_tags.append((tup[1], tup[1])) break return filtered_tags def functionFilter(self, tagged, pure_entities): keep_words = ['xVB', 'xRP', 'xNN', 'xIN'] change_tags = ['xCD'] NUM = [] # change tags -> keep tags -> return array of tuple filtered_tags = [] for tup in tagged: for k_w in keep_words: if tup[1] == k_w: filtered_tags.append(tup) break if tup[1] == 'xCD': NUM.append(int(tup[0])) for c_t in change_tags: if tup[1] == c_t: filtered_tags.append((tup[1], tup[1])) break text = [tup[0] for tup in filtered_tags] f_type, prob = self.classifier.classifyText(" ".join(text)) msg = "" percent = 15 if len(NUM) > 0: percent = int(NUM[0]) if f_type == 1: return "".join([msg, self.increaseVolume(percent)]) if f_type == 2: return "".join([msg, self.increaseVolume(percent, False)]) if f_type == 3: return "".join([msg, self.increaseBrightness(percent)]) if f_type == 4: return "".join([msg, self.increaseBrightness(percent, False)]) if f_type == 5: return "".join([msg, self.setVolume(percent)]) if f_type == 6: return "".join([msg, self.setBrightness(percent)]) return "I'm sorry, I didn't get that" def setVolume(self, percent): os.system("".join([self.volumeCmd, str(percent), '%'])) return "Volume set" def increaseVolume(self, percent, positive=True): sign = '+' if positive == False: sign = '-' os.system("".join([self.volumeCmd, str(percent), '%', sign])) return "Volume increased/decreased" def setBrightness(self, percent): if percent > 100: percent = 100 if percent < 0: percent = 0 self.brightness = int(percent*self.max_brightness/100) #sudoService = Popen(['sudo', '-S', 'su'], stdout=PIPE, stderr=PIPE) #o = sudoService.communicate(input='ironpatriot') os.system(" ".join(["echo", str(self.brightness), ">>", self.brightnessDir])) #brightnessService = Popen(["echo", " ".join(["2", ">>", self.brightnessDir])], stdout=PIPE, stderr=PIPE) #out = brightnessService.communicate(input='2') #sudoService = Popen(['exit']) return "Brightness set" def increaseBrightness(self, percent, positive=True): cPercent = self.brightness*100/self.max_brightness if positive: cPercent = cPercent + percent else: cPercent = cPercent - percent return self.setBrightness(cPercent) """Miscellaneous bsddb module test cases """ import os, sys import unittest from test_all import db, dbshelve, hashopen, test_support, get_new_environment_path, get_new_database_path #---------------------------------------------------------------------- class MiscTestCase(unittest.TestCase): def setUp(self): self.filename = get_new_database_path() self.homeDir = get_new_environment_path() def tearDown(self): test_support.unlink(self.filename) test_support.rmtree(self.homeDir) def test01_badpointer(self): dbs = dbshelve.open(self.filename) dbs.close() self.assertRaises(db.DBError, dbs.get, "foo") def test02_db_home(self): env = db.DBEnv() # check for crash fixed when db_home is used before open() self.assertTrue(env.db_home is None) env.open(self.homeDir, db.DB_CREATE) if sys.version_info[0] < 3 : self.assertEqual(self.homeDir, env.db_home) else : self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home) def test03_repr_closed_db(self): db = hashopen(self.filename) db.close() rp = repr(db) self.assertEqual(rp, "{}") def test04_repr_db(self) : db = hashopen(self.filename) d = {} for i in xrange(100) : db[repr(i)] = repr(100*i) d[repr(i)] = repr(100*i) db.close() db = hashopen(self.filename) rp = repr(db) self.assertEqual(rp, repr(d)) db.close() # http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900 # # See the bug report for details. # # The problem was that make_key_dbt() was not allocating a copy of # string keys but FREE_DBT() was always being told to free it when the # database was opened with DB_THREAD. def test05_double_free_make_key_dbt(self): try: db1 = db.DB() db1.open(self.filename, None, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD) curs = db1.cursor() t = curs.get("/foo", db.DB_SET) # double free happened during exit from DBC_get finally: db1.close() test_support.unlink(self.filename) def test06_key_with_null_bytes(self): try: db1 = db.DB() db1.open(self.filename, None, db.DB_HASH, db.DB_CREATE) db1['a'] = 'eh?' db1['a\x00'] = 'eh zed.' db1['a\x00a'] = 'eh zed eh?' db1['aaa'] = 'eh eh eh!' keys = db1.keys() keys.sort() self.assertEqual(['a', 'a\x00', 'a\x00a', 'aaa'], keys) self.assertEqual(db1['a'], 'eh?') self.assertEqual(db1['a\x00'], 'eh zed.') self.assertEqual(db1['a\x00a'], 'eh zed eh?') self.assertEqual(db1['aaa'], 'eh eh eh!') finally: db1.close() test_support.unlink(self.filename) def test07_DB_set_flags_persists(self): try: db1 = db.DB() db1.set_flags(db.DB_DUPSORT) db1.open(self.filename, db.DB_HASH, db.DB_CREATE) db1['a'] = 'eh' db1['a'] = 'A' self.assertEqual([('a', 'A')], db1.items()) db1.put('a', 'Aa') self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items()) db1.close() db1 = db.DB() # no set_flags call, we're testing that it reads and obeys # the flags on open. db1.open(self.filename, db.DB_HASH) self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items()) # if it read the flags right this will replace all values # for key 'a' instead of adding a new one. (as a dict should) db1['a'] = 'new A' self.assertEqual([('a', 'new A')], db1.items()) finally: db1.close() test_support.unlink(self.filename) def test08_ExceptionTypes(self) : self.assertTrue(issubclass(db.DBError, Exception)) for i, j in db.__dict__.items() : if i.startswith("DB") and i.endswith("Error") : self.assertTrue(issubclass(j, db.DBError), msg=i) if i not in ("DBKeyEmptyError", "DBNotFoundError") : self.assertFalse(issubclass(j, KeyError), msg=i) # This two exceptions have two bases self.assertTrue(issubclass(db.DBKeyEmptyError, KeyError)) self.assertTrue(issubclass(db.DBNotFoundError, KeyError)) #---------------------------------------------------------------------- def test_suite(): return unittest.makeSuite(MiscTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite') # ___________MAIN MODULE___________________ # V-1.04 # Python 2.7.6 # Port Scanner __author__ = "Sudhanshu Patel (sudhanshuptl13@gmail.com)" import sys,os try: sys.path.insert(0,'sub_program') import scport import filehand except: print 'Unable to Find subprogram folder or scport.py' sys.exit() if __name__=='__main__': # Information for User print "Enter a remote host to scane Ex:- www.nitrkl.ac.in\n"+"-"*60 print (" "*29)+"OR\n"+"_"*60 print "Enter a remote computer name in your local network Ex:- Sudhanshu\n"+"-"*60 while True: try: print """ 1 ::- Scan remote Host for most common used Port 2 ::- Brute Scan : Scan host for all port in range 0 to 1024 3 ::- Scan remote host for port you want (you have to enter port) 4 ::- Exit !! """ choice=raw_input('Enter Your Choice ::- ') if choice=='1': ob=scport.Portscan(raw_input('Enter Remote Host name :- ')) ls=ob.Scan_port() #we use ls for to store it in file st=''' We Search your Given remoteHost .. for most common port that are used by many server ''' fh=filehand.File_handling('Result Most common port.txt',st) if len(ls)>2: #if any port found fh.f_do_it(ls) else: ls.append('No Port Open Port Found or Remote host Not found') fh.f_do_it(ls) print '***See Summary of result in "Result Most common port.txt" file***' elif choice=='2': ob=scport.Portscan(raw_input('Enter Remote Host name :- ')) ls=ob.Brute_scan() st=''' We Search your Given remoteHost .. for All Open Port in range 1 to 1024 ''' fh=filehand.File_handling('Result Brute scan.txt',st) if len(ls)>2: #if any port found fh.f_do_it(ls) else: ls.append('No Port Open Port Found or Remote host Not found') fh.f_do_it(ls) print '***See Summary of result in "Result Brute port.txt" file***' elif choice=='3': print 'Enter port address seperated by space on which scan is going to perform :' ls=[int(x) for x in raw_input().split(' ')] ob=scport.Portscan(raw_input('Enter Remote Host name :- '),ls) ls= ob.Scan_port() st=''' We Search your Given remoteHost .. for All Port Given By You ''' fh=filehand.File_handling('Result your Port.txt',st) if len(ls)>2: #if any port found fh.f_do_it(ls) else: ls.append('No Port Open Port Found or Remote host Not found') fh.f_do_it(ls) print '***See Summary of result in "Result your Port.txt" file***' elif choice=='4': print 'Thank You !' sys.exit() else: print "Wrong Choice Try again" except KeyboardInterrupt: print 'You Pressed Ctrl+C' sys.exit() ############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013, John McNamara, jmcnamara@cpan.org # import unittest from ...compatibility import StringIO from ...worksheet import Worksheet class TestExtractFilterTokens(unittest.TestCase): """ Test the Worksheet _extract_filter_tokens() method. """ def setUp(self): self.fh = StringIO() self.worksheet = Worksheet() self.worksheet._set_filehandle(self.fh) def test_extract_filter_tokens(self): """Test the _extract_filter_tokens() method""" testcases = [ [ None, [], ], [ '', [], ], [ '0 < 2001', ['0', '<', '2001'], ], [ 'x < 2000', ['x', '<', '2000'], ], [ 'x > 2000', ['x', '>', '2000'], ], [ 'x == 2000', ['x', '==', '2000'], ], [ 'x > 2000 and x < 5000', ['x', '>', '2000', 'and', 'x', '<', '5000'], ], [ 'x = "goo"', ['x', '=', 'goo'], ], [ 'x = moo', ['x', '=', 'moo'], ], [ 'x = "foo baz"', ['x', '=', 'foo baz'], ], [ 'x = "moo "" bar"', ['x', '=', 'moo " bar'], ], [ 'x = "foo bar" or x = "bar foo"', ['x', '=', 'foo bar', 'or', 'x', '=', 'bar foo'], ], [ 'x = "foo "" bar" or x = "bar "" foo"', ['x', '=', 'foo " bar', 'or', 'x', '=', 'bar " foo'], ], [ 'x = """"""""', ['x', '=', '"""'], ], [ 'x = Blanks', ['x', '=', 'Blanks'], ], [ 'x = NonBlanks', ['x', '=', 'NonBlanks'], ], [ 'top 10 %', ['top', '10', '%'], ], [ 'top 10 items', ['top', '10', 'items'], ], ] for testcase in testcases: expression = testcase[0] exp = testcase[1] got = self.worksheet._extract_filter_tokens(expression) self.assertEqual(got, exp) if __name__ == '__main__': unittest.main() #!/usr/bin/python # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD # # Copyright (c) 2010 Rising Tide Systems # Copyright (c) 2010 Linux-iSCSI.org # # Author: nab@kernel.org # import os, sys import subprocess as sub import string import re import optparse tcm_dir = "" fabric_ops = [] fabric_mod_dir = "" fabric_mod_port = "" fabric_mod_init_port = "" def tcm_mod_err(msg): print msg sys.exit(1) def tcm_mod_create_module_subdir(fabric_mod_dir_var): if os.path.isdir(fabric_mod_dir_var) == True: return 1 print "Creating fabric_mod_dir: " + fabric_mod_dir_var ret = os.mkdir(fabric_mod_dir_var) if ret: tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) return def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" buf += " u64 nport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* FC lport target portal group tag for TCM */\n" buf += " u16 lport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" buf += " struct " + fabric_mod_name + "_lport *lport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_lport {\n" buf += " /* SCSI protocol the lport is providing */\n" buf += " u8 lport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" buf += " u64 lport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Target Lport */\n" buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" buf += " struct se_wwn lport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "lport" fabric_mod_init_port = "nport" return def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" buf += " u64 iport_wwpn;\n" buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* SAS port target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" buf += " u64 tport_wwpn;\n" buf += " /* ASCII formatted WWPN for SAS Target port */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* ASCII formatted InitiatorName */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* iSCSI target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* ASCII formatted TargetName for IQN */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): if proto_ident == "FC": tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "SAS": tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "iSCSI": tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) else: print "Unsupported proto_ident: " + proto_ident sys.exit(1) return def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n\n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "/* Local pointer to allocated TCM configfs fabric module */\n" buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n" buf += " u32 nexus_depth;\n\n" buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n" buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" buf += " if (!(se_nacl_new))\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" buf += " nexus_depth = 1;\n" buf += " /*\n" buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" buf += " * when converting a NodeACL from demo mode -> explict\n" buf += " */\n" buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" buf += " name, nexus_depth);\n" buf += " if (IS_ERR(se_nacl)) {\n" buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" buf += " return se_nacl;\n" buf += " }\n" buf += " /*\n" buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" buf += " */\n" buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return se_nacl;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" buf += " kfree(nacl);\n" buf += "}\n\n" buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" buf += " struct se_wwn *wwn,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" buf += " unsigned long tpgt;\n" buf += " int ret;\n\n" buf += " if (strstr(name, \"tpgt_\") != name)\n" buf += " return ERR_PTR(-EINVAL);\n" buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" buf += " return ERR_PTR(-EINVAL);\n\n" buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" buf += " if (!(tpg)) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" buf += " &tpg->se_tpg, (void *)tpg,\n" buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" buf += " }\n" buf += " return &tpg->se_tpg;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" buf += " core_tpg_deregister(se_tpg);\n" buf += " kfree(tpg);\n" buf += "}\n\n" buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n\n" buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n\n" buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" buf += " if (!(" + fabric_mod_port + ")) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" buf += " kfree(" + fabric_mod_port + ");\n" buf += "}\n\n" buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " char *page)\n" buf += "{\n" buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += "}\n\n" buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" buf += " NULL,\n" buf += "};\n\n" buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n" buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_post_link = NULL,\n" buf += " .fabric_pre_unlink = NULL,\n" buf += " .fabric_make_np = NULL,\n" buf += " .fabric_drop_np = NULL,\n" buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" buf += "};\n\n" buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" buf += "{\n" buf += " struct target_fabric_configfs *fabric;\n" buf += " int ret;\n\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" buf += " if (!(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return -ENOMEM;\n" buf += " }\n" buf += " /*\n" buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" buf += " */\n" buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" buf += " /*\n" buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" buf += " */\n" buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" buf += " /*\n" buf += " * Register the fabric for use within TCM\n" buf += " */\n" buf += " ret = target_fabric_configfs_register(fabric);\n" buf += " if (ret < 0) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" buf += " return ret;\n" buf += " }\n" buf += " /*\n" buf += " * Setup our local pointer to *fabric\n" buf += " */\n" buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n" buf += "{\n" buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n" buf += " return;\n\n" buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" buf += " int ret;\n\n" buf += " ret = " + fabric_mod_name + "_register_configfs();\n" buf += " if (ret < 0)\n" buf += " return ret;\n\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void " + fabric_mod_name + "_exit(void)\n" buf += "{\n" buf += " " + fabric_mod_name + "_deregister_configfs();\n" buf += "};\n\n" buf += "#ifdef MODULE\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_LICENSE(\"GPL\");\n" buf += "module_init(" + fabric_mod_name + "_init);\n" buf += "module_exit(" + fabric_mod_name + "_exit);\n" buf += "#endif\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_scan_fabric_ops(tcm_dir): fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h" print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api process_fo = 0; p = open(fabric_ops_api, 'r') line = p.readline() while line: if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): line = p.readline() continue if process_fo == 0: process_fo = 1; line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) continue line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) p.close() return def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" bufi = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" print "Writing file: " + fi pi = open(fi, 'w') if not pi: tcm_mod_err("Unable to open file: " + fi) buf = "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n\n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n" buf += "#include \n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" total_fabric_ops = len(fabric_ops) i = 0 while i < total_fabric_ops: fo = fabric_ops[i] i += 1 # print "fabric_ops: " + fo if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" buf += " return \"" + fabric_mod_name[4:] + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue if re.search('get_fabric_proto_ident', fo): buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " u8 proto_id;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" buf += " }\n\n" buf += " return proto_id;\n" buf += "}\n\n" bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" if re.search('get_wwn', fo): buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" if re.search('get_tag', fo): buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" if re.search('get_default_depth', fo): buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" if re.search('get_pr_transport_id\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code,\n" buf += " unsigned char *buf)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *, unsigned char *);\n" if re.search('get_pr_transport_id_len\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *);\n" if re.search('parse_pr_out_transport_id\)\(', fo): buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " const char *buf,\n" buf += " u32 *out_tid_len,\n" buf += " char **port_nexus_ptr)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " char *tid = NULL;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" buf += " }\n\n" buf += " return tid;\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" bufi += " const char *, u32 *, char **);\n" if re.search('alloc_fabric_acl\)\(', fo): buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" buf += " if (!(nacl)) {\n" buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n" buf += " return NULL;\n" buf += " }\n\n" buf += " return &nacl->se_node_acl;\n" buf += "}\n\n" bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" if re.search('release_fabric_acl\)\(', fo): buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " kfree(nacl);\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" bufi += " struct se_node_acl *);\n" if re.search('tpg_get_inst_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" if re.search('release_cmd_to_pool', fo): buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" if re.search('shutdown_session\)\(', fo): buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" if re.search('close_session\)\(', fo): buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" if re.search('stop_session\)\(', fo): buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" if re.search('fall_back_to_erl0\)\(', fo): buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" if re.search('sess_logged_in\)\(', fo): buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" if re.search('write_pending\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" if re.search('write_pending_status\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" if re.search('get_task_tag\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" if re.search('get_cmd_state\)\(', fo): buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" if re.search('new_cmd_failure\)\(', fo): buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n" if re.search('queue_data_in\)\(', fo): buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" if re.search('queue_status\)\(', fo): buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" if re.search('get_fabric_sense_len\)\(', fo): buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n" if re.search('set_fabric_sense_len\)\(', fo): buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n" if re.search('is_state_remove\)\(', fo): buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" if re.search('pack_lun\)\(', fo): buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n" buf += "{\n" buf += " WARN_ON(lun >= 256);\n" buf += " /* Caller wants this byte-swapped */\n" buf += " return cpu_to_le64((lun & 0xff) << 8);\n" buf += "}\n\n" bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() ret = pi.write(bufi) if ret: tcm_mod_err("Unable to write fi: " + fi) pi.close() return def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Makefile" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" buf += " " + fabric_mod_name + "_configfs.o\n" buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Kconfig" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf = "config " + fabric_mod_name.upper() + "\n" buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" buf += " depends on TARGET_CORE && CONFIGFS_FS\n" buf += " default n\n" buf += " ---help---\n" buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" kbuild = tcm_dir + "/drivers/target/Makefile" f = open(kbuild, 'a') f.write(buf) f.close() return def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" kconfig = tcm_dir + "/drivers/target/Kconfig" f = open(kconfig, 'a') f.write(buf) f.close() return def main(modname, proto_ident): # proto_ident = "FC" # proto_ident = "SAS" # proto_ident = "iSCSI" tcm_dir = os.getcwd(); tcm_dir += "/../../" print "tcm_dir: " + tcm_dir fabric_mod_name = modname fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name print "Set fabric_mod_name: " + fabric_mod_name print "Set fabric_mod_dir: " + fabric_mod_dir print "Using proto_ident: " + proto_ident if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": print "Unsupported proto_ident: " + proto_ident sys.exit(1) ret = tcm_mod_create_module_subdir(fabric_mod_dir) if ret: print "tcm_mod_create_module_subdir() failed because module already exists!" sys.exit(1) tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_scan_fabric_ops(tcm_dir) tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) return parser = optparse.OptionParser() parser.add_option('-m', '--modulename', help='Module name', dest='modname', action='store', nargs=1, type='string') parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', action='store', nargs=1, type='string') (opts, args) = parser.parse_args() mandatories = ['modname', 'protoident'] for m in mandatories: if not opts.__dict__[m]: print "mandatory option is missing\n" parser.print_help() exit(-1) if __name__ == "__main__": main(str(opts.modname), opts.protoident) # -*- coding: utf-8 -*- # Copyright (C) 2006-2007 Søren Roug, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # from namespaces import SCRIPTNS from element import Element # ODF 1.0 section 12.4.1 # The element binds an event to a macro. # Autogenerated def EventListener(**args): return Element(qname = (SCRIPTNS,'event-listener'), **args) __author__ = 'Iurii Sergiichuk ' gal1 = tuple(range(256)) gal2 = ( 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05, 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25, 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45, 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65, 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85, 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5, 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5, 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5) gal3 = ( 0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11, 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21, 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71, 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41, 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1, 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1, 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1, 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81, 0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a, 0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba, 0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea, 0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda, 0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a, 0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a, 0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a, 0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a) gal9 = ( 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, 0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7, 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, 0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc, 0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01, 0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91, 0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a, 0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa, 0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b, 0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b, 0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30, 0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed, 0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d, 0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6, 0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46) gal11 = ( 0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69, 0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9, 0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12, 0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2, 0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f, 0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f, 0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4, 0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54, 0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e, 0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e, 0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5, 0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55, 0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68, 0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8, 0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13, 0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3) gal13 = ( 0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b, 0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b, 0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0, 0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20, 0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26, 0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6, 0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d, 0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d, 0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91, 0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41, 0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a, 0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa, 0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc, 0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c, 0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47, 0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97) gal14 = ( 0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a, 0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba, 0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81, 0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61, 0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7, 0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17, 0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c, 0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc, 0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b, 0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb, 0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0, 0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20, 0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6, 0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56, 0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d, 0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d) galI = gal14, gal11, gal13, gal9 galNI = gal2, gal3, gal1, gal1 # TODO NEED TO CHANGE SBOX 2-4 value to unique sbox_1 = ( 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16) sbox_2 = ( 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16) sbox_3 = ( 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16) sbox_4 = ( 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16) # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib try: import cPickle as pickle except ImportError: import pickle import errno import socket import time from eventlet import queue from eventlet import timeout from oslo_config import cfg from oslo_log import log as logging from oslo_utils import versionutils from six.moves import range try: import xmlrpclib except ImportError: import six.moves.xmlrpc_client as xmlrpclib from nova import context from nova import exception from nova.i18n import _, _LE, _LW from nova import objects from nova import utils from nova import version from nova.virt.xenapi.client import objects as cli_objects from nova.virt.xenapi import pool from nova.virt.xenapi import pool_states LOG = logging.getLogger(__name__) xenapi_session_opts = [ cfg.IntOpt('login_timeout', default=10, help='Timeout in seconds for XenAPI login.'), cfg.IntOpt('connection_concurrent', default=5, help='Maximum number of concurrent XenAPI connections. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), ] CONF = cfg.CONF CONF.register_opts(xenapi_session_opts, 'xenserver') CONF.import_opt('host', 'nova.netconf') def apply_session_helpers(session): session.VM = cli_objects.VM(session) session.SR = cli_objects.SR(session) session.VDI = cli_objects.VDI(session) session.VBD = cli_objects.VBD(session) session.PBD = cli_objects.PBD(session) session.PIF = cli_objects.PIF(session) session.VLAN = cli_objects.VLAN(session) session.host = cli_objects.Host(session) session.network = cli_objects.Network(session) session.pool = cli_objects.Pool(session) class XenAPISession(object): """The session to invoke XenAPI SDK calls.""" # This is not a config option as it should only ever be # changed in development environments. # MAJOR VERSION: Incompatible changes with the plugins # MINOR VERSION: Compatible changes, new plguins, etc PLUGIN_REQUIRED_VERSION = '1.2' def __init__(self, url, user, pw): version_string = version.version_string_with_package() self.nova_version = _('%(vendor)s %(product)s %(version)s') % \ {'vendor': version.vendor_string(), 'product': version.product_string(), 'version': version_string} import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() self.is_slave = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) self._populate_session_pool(url, user, pw, exception) self.host_uuid = self._get_host_uuid() self.host_ref = self._get_host_ref() self.product_version, self.product_brand = \ self._get_product_version_and_brand() self._verify_plugin_version() apply_session_helpers(self) def _verify_plugin_version(self): requested_version = self.PLUGIN_REQUIRED_VERSION current_version = self.call_plugin_serialized( 'nova_plugin_version', 'get_version') if not versionutils.is_compatible(requested_version, current_version): raise self.XenAPI.Failure( _("Plugin version mismatch (Expected %(exp)s, got %(got)s)") % {'exp': requested_version, 'got': current_version}) def _create_first_session(self, url, user, pw, exception): try: session = self._create_session(url) with timeout.Timeout(CONF.xenserver.login_timeout, exception): session.login_with_password(user, pw, self.nova_version, 'OpenStack') except self.XenAPI.Failure as e: # if user and pw of the master are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] url = pool.swap_xapi_host(url, master) session = self.XenAPI.Session(url) session.login_with_password(user, pw, self.nova_version, 'OpenStack') self.is_slave = True else: raise self._sessions.put(session) return url def _populate_session_pool(self, url, user, pw, exception): for i in range(CONF.xenserver.connection_concurrent - 1): session = self._create_session(url) with timeout.Timeout(CONF.xenserver.login_timeout, exception): session.login_with_password(user, pw, self.nova_version, 'OpenStack') self._sessions.put(session) def _get_host_uuid(self): if self.is_slave: aggr = objects.AggregateList.get_by_host( context.get_admin_context(), CONF.host, key=pool_states.POOL_FLAG)[0] if not aggr: LOG.error(_LE('Host is member of a pool, but DB ' 'says otherwise')) raise exception.AggregateHostNotFound() return aggr.metadetails[CONF.host] else: with self._get_session() as session: host_ref = session.xenapi.session.get_this_host(session.handle) return session.xenapi.host.get_uuid(host_ref) def _get_product_version_and_brand(self): """Return a tuple of (major, minor, rev) for the host version and a string of the product brand. """ software_version = self._get_software_version() product_version_str = software_version.get('product_version') # Product version is only set in some cases (e.g. XCP, XenServer) and # not in others (e.g. xenserver-core, XAPI-XCP). # In these cases, the platform version is the best number to use. if product_version_str is None: product_version_str = software_version.get('platform_version', '0.0.0') product_brand = software_version.get('product_brand') product_version = utils.convert_version_to_tuple(product_version_str) return product_version, product_brand def _get_software_version(self): return self.call_xenapi('host.get_software_version', self.host_ref) def get_session_id(self): """Return a string session_id. Used for vnc consoles.""" with self._get_session() as session: return str(session._session) @contextlib.contextmanager def _get_session(self): """Return exclusive session for scope of with statement.""" session = self._sessions.get() try: yield session finally: self._sessions.put(session) def _get_host_ref(self): """Return the xenapi host on which nova-compute runs on.""" with self._get_session() as session: return session.xenapi.host.get_by_uuid(self.host_uuid) def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread.""" with self._get_session() as session: return session.xenapi_request(method, args) def call_plugin(self, plugin, fn, args): """Call host.call_plugin on a background thread.""" # NOTE(armando): pass the host uuid along with the args so that # the plugin gets executed on the right host when using XS pools args['host_uuid'] = self.host_uuid with self._get_session() as session: return self._unwrap_plugin_exceptions( session.xenapi.host.call_plugin, self.host_ref, plugin, fn, args) def call_plugin_serialized(self, plugin, fn, *args, **kwargs): params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))} rv = self.call_plugin(plugin, fn, params) return pickle.loads(rv) def call_plugin_serialized_with_retry(self, plugin, fn, num_retries, callback, retry_cb=None, *args, **kwargs): """Allows a plugin to raise RetryableError so we can try again.""" attempts = num_retries + 1 sleep_time = 0.5 for attempt in range(1, attempts + 1): try: if attempt > 1: time.sleep(sleep_time) sleep_time = min(2 * sleep_time, 15) callback_result = None if callback: callback_result = callback(kwargs) msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, ' 'callback_result: %(callback_result)s') LOG.debug(msg, {'plugin': plugin, 'fn': fn, 'attempt': attempt, 'attempts': attempts, 'callback_result': callback_result}) return self.call_plugin_serialized(plugin, fn, *args, **kwargs) except self.XenAPI.Failure as exc: if self._is_retryable_exception(exc, fn): LOG.warning(_LW('%(plugin)s.%(fn)s failed. ' 'Retrying call.'), {'plugin': plugin, 'fn': fn}) if retry_cb: retry_cb(exc=exc) else: raise except socket.error as exc: if exc.errno == errno.ECONNRESET: LOG.warning(_LW('Lost connection to XenAPI during call to ' '%(plugin)s.%(fn)s. Retrying call.'), {'plugin': plugin, 'fn': fn}) if retry_cb: retry_cb(exc=exc) else: raise raise exception.PluginRetriesExceeded(num_retries=num_retries) def _is_retryable_exception(self, exc, fn): _type, method, error = exc.details[:3] if error == 'RetryableError': LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn}, exc_info=True) return True elif "signal" in method: LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn}, exc_info=True) return True else: return False def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" self.is_local_connection = url == "unix://local" if self.is_local_connection: return self.XenAPI.xapi_local() return self.XenAPI.Session(url) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details.""" try: return func(*args, **kwargs) except self.XenAPI.Failure as exc: LOG.debug("Got exception: %s", exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): params = None try: # FIXME(comstud): eval is evil. params = eval(exc.details[3]) except Exception: raise exc raise self.XenAPI.Failure(params) else: raise except xmlrpclib.ProtocolError as exc: LOG.debug("Got exception: %s", exc) raise def get_rec(self, record_type, ref): try: return self.call_xenapi('%s.get_record' % record_type, ref) except self.XenAPI.Failure as e: if e.details[0] != 'HANDLE_INVALID': raise return None def get_all_refs_and_recs(self, record_type): """Retrieve all refs and recs for a Xen record type. Handles race-conditions where the record may be deleted between the `get_all` call and the `get_record` call. """ return self.call_xenapi('%s.get_all_records' % record_type).items() # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import unittest from memory_inspector.classification import results from memory_inspector.classification import rules class ResultsTest(unittest.TestCase): def runTest(self): rules_dict = [ { 'name': 'a*', 'regex': '^a.*', 'children': [ { 'name': 'az*', 'regex': '^az.*' } ] }, { 'name': 'b*', 'regex': '^b.*', }, ] rule = rules.Load(str(rules_dict), MockRegexMatchingRule) result = results.AggreatedResults(rule, keys=['X', 'Y']) self.assertEqual(result.total.name, 'Total') self.assertEqual(len(result.total.children), 3) self.assertEqual(result.total.children[0].name, 'a*') self.assertEqual(result.total.children[1].name, 'b*') self.assertEqual(result.total.children[2].name, 'Total-other') self.assertEqual(result.total.children[0].children[0].name, 'az*') self.assertEqual(result.total.children[0].children[1].name, 'a*-other') result.AddToMatchingNodes('aa1', [1, 2]) # -> a* result.AddToMatchingNodes('aa2', [3, 4]) # -> a* result.AddToMatchingNodes('az', [5, 6]) # -> a*/az* result.AddToMatchingNodes('z1', [7, 8]) # -> T-other result.AddToMatchingNodes('b1', [9, 10]) # -> b* result.AddToMatchingNodes('b2', [11, 12]) # -> b* result.AddToMatchingNodes('z2', [13, 14]) # -> T-other self.assertEqual(result.total.values, [49, 56]) self.assertEqual(result.total.children[0].values, [9, 12]) self.assertEqual(result.total.children[1].values, [20, 22]) self.assertEqual(result.total.children[0].children[0].values, [5, 6]) self.assertEqual(result.total.children[0].children[1].values, [4, 6]) self.assertEqual(result.total.children[2].values, [20, 22]) class MockRegexMatchingRule(rules.Rule): def __init__(self, name, filters): super(MockRegexMatchingRule, self).__init__(name) self._regex = filters['regex'] def Match(self, s): return bool(re.match(self._regex, s)) #-*- coding: utf-8 -*- #Arquivo gerenciador de dados do banco de dados (NDB) #Google NDB from google.appengine.ext import ndb from random import shuffle import datetime #Entidade que guarda os IDs dos chats class Chats(ndb.Model): chats = ndb.StringProperty(repeated = True) #Checa a exisencia do chat, se não existir cria todas as entidades necessárias def checkChat(chat_id): c = ndb.Key(Chats, 'chats').get() if not c: c = Chats(id = 'chats') c.put() checkChat(chat_id) if not (chat_id in c.chats): e = Enabled(id = chat_id) s = Settings(id = chat_id) r = Rank(id = chat_id) d = Dados(id = chat_id) c.chats.append(chat_id) r.put() d.put() e.put() s.put() c.put() return False return True #Remove o chat e todas as suas dependências def delChat(chat_id): c = ndb.Key(Chats, 'chats').get() e = ndb.Key(Enabled, chat_id).get() s = ndb.Key(Settings, chat_id).get() g = ndb.Key(Game, chat_id).get() d = ndb.Key(Dados, chat_id).get() r = ndb.Key(Rank, chat_id).get() status = False if chat_id in c.chats: c.chats.remove(chat_id) c.put() status = True if d: d.key.delete() status = True if e: e.key.delete() status = True if s: s.key.delete() status = True if g: g.key.delete() status = True if r: r.key.delete() status = True return status #Retorna a lista de todos os chats ativos no momento def getChats(): c = ndb.Key(Chats, 'chats').get() return c.chats def checkChatBd(chat_id): c = ndb.Key(Chats, 'chats').get() e = ndb.Key(Enabled, chat_id).get() s = ndb.Key(Settings, chat_id).get() d = ndb.Key(Dados, chat_id).get() r = ndb.Key(Rank, chat_id).get() status = True if not e: e = Enabled(id = chat_id) e.put() status = False if not s: s = Settings(id = chat_id) s.put() status = False if not d: d = Dados(id = chat_id) d.put() status = False if not r: r = Rank(id = chat_id) r.put() status = False return status #Guarda o estado de "ligado" e "deligado" de cada chat class Enabled(ndb.Model): enabled = ndb.BooleanProperty(indexed = False, default = False) #Grava o dado recebido como parâmetro, todos os 'sets' serão assim def setEnabled(chat_id, status): e = ndb.Key(Enabled, chat_id).get() e.enabled = status e.put() return #Retorna o estado atual do chat def getEnabled(chat_id): e = ndb.Key(Enabled, chat_id).get() return e.enabled class Shout(ndb.Model): shout = ndb.StringProperty(indexed = False, default = '') pos = ndb.IntegerProperty(indexed = False, default = 0) enable = ndb.BooleanProperty(indexed = False, default = False) def getShout(): s = ndb.Key(Shout, 'Shout').get() if not s: s = Shout(id = 'Shout') s.put() s = ndb.Key(Shout, 'Shout').get() if s.enable: chats = getChats() s.pos = s.pos + 1 if (s.pos + 1) < len(chats) else 0 if s.pos == 0: s.enable = False s.put() return [chats[s.pos], s.shout, (len(chats) - (s.pos + 1))] return None def setShout(shout): s = ndb.Key(Shout, 'Shout').get() s.shout = shout s.enable = True s.put() return def delShout(): s = ndb.Key(Shout, 'Shout').get() s.enable = False s.pos = 0 s.shout = '' s.put() def lessPos(): s = ndb.Key(Shout, 'Shout').get() s.pos = s.pos - 1 s.put() #Guarda as configurações de cada chat class Settings(ndb.Model): language = ndb.StringProperty(indexed = False, default = 'enUS') waiting = ndb.BooleanProperty(indexed = False, default = True) first = ndb.BooleanProperty(indexed = False, default = True) welcome = ndb.BooleanProperty(indexed = False, default = True) categorias = ndb.BooleanProperty(indexed = False, default = False) cats = ndb.IntegerProperty(repeated = True) def setCats(chat_id,cats): s = ndb.Key(Settings, chat_id).get() s.cats = cats s.categorias = False s.put() return def getCats(chat_id): s = ndb.Key(Settings, chat_id).get() return s.cats def getFirstWelcome(chat_id): s = ndb.Key(Settings, chat_id).get() return [s.first,s.welcome] def setWelcome(chat_id): s = ndb.Key(Settings, chat_id).get() s.welcome = False s.put() def setFirst(chat_id): s = ndb.Key(Settings, chat_id).get() s.first = False s.put() #Retorna a "classe" configurações do chat (caso não existir cria uma nova entidade) def getSettings(chat_id): s = ndb.Key(Settings, chat_id).get() if s: return s if checkChat(chat_id): checkChatBd(chat_id) getSettings(chat_id) def setLanguage(chat_id, language): s = ndb.Key(Settings, chat_id).get() s.language = language s.put() return def setWaiting(chat_id, waiting): s = ndb.Key(Settings, chat_id).get() s.waiting = waiting s.put() return def setCategorias(chat_id, state): s = ndb.Key(Settings, chat_id).get() s.categorias = state s.put() return class User(ndb.Model): u_id = ndb.StringProperty() u_name = ndb.StringProperty() u_score = ndb.IntegerProperty() class Rank(ndb.Model): players = ndb.StructuredProperty(User, repeated = True) def addPlayerRank(chat_id, u_id, u_name): r = ndb.Key(Rank, chat_id).get() d = ndb.Key(Dados, chat_id).get() for i in range(len(r.players)): if u_id == r.players[i].u_id: return False user = User(u_id = u_id, u_name = u_name, u_score = 0) d.players.append(user) r.players.append(user) r.put() d.put() return True def getRank(chat_id): r = ndb.Key(Rank, chat_id).get() d = ndb.Key(Dados, chat_id).get() if len(r.players) != 0: rank = sorted(r.players, key = lambda players: players.u_score, reverse = True) nomes = [] scores = [] for i in range(len(rank)): nomes.append(rank[i].u_name.encode('utf-8')) scores.append(rank[i].u_score) r.players = rank d.topPlayer = rank[0] r.put() d.put() return [nomes, scores] return [] def addScore(chat_id, u_id, score): r = ndb.Key(Rank, chat_id).get() for i in range(len(r.players)): if r.players[i].u_id == u_id: r.players[i].u_score += score r.put() return class Dados(ndb.Model): games = ndb.IntegerProperty(indexed = False, default = 0) topPlayer = ndb.StructuredProperty(User, default = User(u_id = 'noID', u_name ='noPlayer', u_score = 0)) players = ndb.StructuredProperty(User, repeated = True) last_att = ndb.IntegerProperty(indexed = False, default = 1) jogos_dia = ndb.IntegerProperty(indexed = False, default = 0) def getDadosChat(chat_id): d = ndb.Key(Dados, chat_id).get() if d: return d d = Dados(id = chat_id) d.put() return False def getDadosGlobais(date): c = ndb.Key(Chats, 'chats').get() n_games = 0 n_players = 0 jogos_dia = 0 u_ids = [] for i in range (len(c.chats)): d = ndb.Key(Dados, c.chats[i]).get() if not d: getDadosChat(c.chats[i]) d = ndb.Key(Dados, c.chats[i]).get() jogos_dia = jogos_dia + getJogosDia(c.chats[i], date) n_games += d.games for j in range(len(d.players)): if not (d.players[j].u_id in u_ids): u_ids.append(d.players[j].u_id) n_players = len(u_ids) return [len(c.chats), n_players, jogos_dia, n_games] def setJogosDia(chat_id, date): d = ndb.Key(Dados, chat_id).get() date = int(datetime.datetime.fromtimestamp(date).strftime('%d')) if date != d.last_att: d.last_att = date d.jogos_dia = 0 d.jogos_dia += 1 d.put() def getJogosDia(chat_id, date): d = ndb.Key(Dados, chat_id).get() date = int(datetime.datetime.fromtimestamp(date).strftime('%d')) if d: if date != d.last_att: return 0 return d.jogos_dia delChat(chat_id) return 0 #Contém todos os dados de cada jogo class Game(ndb.Model): pre_game = ndb.BooleanProperty(indexed = False, default = False) in_game = ndb.BooleanProperty(indexed = False, default = False) u_ids = ndb.StringProperty(repeated = True) u_names = ndb.StringProperty(repeated = True) message_ids = ndb.StringProperty(repeated = True) adm = ndb.StringProperty(default = 'noAdm') adm_name = ndb.StringProperty(default = 'noAdm') adm_message = ndb.StringProperty(default = 'noAdm') rnd = ndb.IntegerProperty(default = 0) palavra = ndb.StringProperty(default = 'noPalavra') categoria = ndb.StringProperty(default = 'noCategoria') mascara = ndb.StringProperty(default = 'noMascara') letras = ndb.StringProperty(repeated = True) vidas = ndb.IntegerProperty(default = 6) vidas_init = ndb.IntegerProperty(default = 6) arriscarBlock = ndb.BooleanProperty(indexed = False, default = False) def getPreGame(chat_id): g = ndb.Key(Game, chat_id).get() if g: return g.pre_game return False def getRound(chat_id): g = ndb.Key(Game, chat_id).get() return g.rnd def roundPlus(chat_id): g = ndb.Key(Game, chat_id).get() g.rnd = g.rnd+1 if g.rnd+1 < len(g.u_ids) else 0 g.put() return def checkRound(chat_id, u_id): g = ndb.Key(Game, chat_id).get() if g.u_ids[g.rnd] == u_id: return True return False def setPreGame(chat_id, status, u_id = None, u_name = None, message_id = None): g = ndb.Key(Game, chat_id).get() if g: g.pre_game = status g.put() return g = Game(id = chat_id) g.put() g = ndb.Key(Game, chat_id).get() g.pre_game = status g.put() addPlayer(chat_id, u_id, u_name, message_id) setAdm(chat_id, u_id, u_name, message_id) return def setInGame(chat_id, status): g = ndb.Key(Game, chat_id).get() g.in_game = status g.put() return def getInGame(chat_id): g = ndb.Key(Game, chat_id).get() if g: return g.in_game return False def addPlayer(chat_id, u_id, u_name, message_id): g = ndb.Key(Game, chat_id).get() if not (u_id in g.u_ids): addPlayerRank(chat_id, u_id, u_name) g.u_ids.append(u_id) g.u_names.append(u_name) g.message_ids.append(message_id) g.put() return True return False def rmPlayer(chat_id, u_id, message_id): g = ndb.Key(Game, chat_id).get() if not g.pre_game: g.rnd = g.rnd-1 if g.rnd != 0 else len(g.u_ids)-1 if u_id in g.u_ids: ind = g.u_ids.index(u_id) g.u_ids.pop(ind) g.u_names.pop(ind) g.message_ids.pop(ind) g.put() if len(g.u_ids) == 0: delGame(chat_id) return False if checkAdm(chat_id, u_id): setAdm(chat_id, g.u_ids[0], g.u_names[0], g.message_ids[0]) return 'setAdm' return True return 'semPlayer' def getPlayers(chat_id): g = ndb.Key(Game, chat_id).get() u_ids = [] u_names = [] message_ids = [] for i in range(len(g.u_ids)): u_ids.append(g.u_ids[i].encode('utf-8')) u_names.append(g.u_names[i].encode('utf-8')) message_ids.append(g.message_ids[i].encode('utf-8')) return [u_ids, u_names, message_ids] #Randomizar a lista de participantes def shufflePlayers(chat_id, date): g = ndb.Key(Game, chat_id).get() setJogosDia(chat_id, date) u_names_shuf = [] u_ids_shuf = [] message_ids_shuf = [] index_shuf = range(len(g.u_ids)) shuffle(index_shuf) for i in index_shuf: u_ids_shuf.append(g.u_ids[i]) u_names_shuf.append(g.u_names[i]) message_ids_shuf.append(g.message_ids[i]) g.u_ids = u_ids_shuf g.u_names = u_names_shuf g.message_ids = message_ids_shuf g.put() def setAdm(chat_id, u_id, u_name, message_id): g = ndb.Key(Game, chat_id).get() g.adm = u_id g.adm_name = u_name g.adm_message = message_id g.put() return def checkAdm(chat_id, u_id): g = ndb.Key(Game, chat_id).get() if g: if g.adm == u_id: return True return False def getAdm(chat_id): g = ndb.Key(Game, chat_id).get() if g: return [g.adm.encode('utf-8'), g.adm_name.encode('utf-8') ,g.adm_message.encode('utf-8')] return False def setCP(chat_id, categoria, palavra): g = ndb.Key(Game, chat_id).get() d = ndb.Key(Dados, chat_id).get() d.games += 1 g.categoria = categoria g.palavra = palavra#palavra.decode('utf-8') mascara = '' for i in range(len(g.palavra)): if palavra[i] == ' ': mascara = mascara+' ' else: mascara = mascara+'*' g.mascara = mascara g.put() d.put() return mascara def getCategoria(chat_id): g = ndb.Key(Game, chat_id).get() return g.categoria.encode('utf-8') def checkPalavra(chat_id, u_id, text): g = ndb.Key(Game, chat_id).get() if g: if text == g.palavra.encode('utf-8').lower(): addScore(chat_id, u_id, (len(text)*2)) g.key.delete() return True return False def getPalavra(chat_id): g = ndb.Key(Game, chat_id).get() return g.palavra def getMascara(chat_id): g = ndb.Key(Game, chat_id).get() return g.mascara.encode('utf-8') def checkUid(chat_id, u_id): g = ndb.Key(Game, chat_id).get() if u_id in g.u_ids: if checkRound(chat_id, u_id): return True return 'rnd' return 'out' def setArriscarBlock(chat_id, opt): g = ndb.Key(Game, chat_id).get() if g: g.arriscarBlock = opt g.put() return return def getArriscarBlock(chat_id): g = ndb.Key(Game, chat_id).get() if g: return g.arriscarBlock return False def setVidas(chat_id): g = ndb.Key(Game, chat_id).get() modVida = len(g.palavra)/5 if len(g.palavra) > 5 else 0 modVida += len(g.u_ids)-3 if len(g.u_ids) > 4 else 0 modVida = 9 if modVida > 9 else modVida vidas = g.vidas + modVida g.vidas = vidas g.vidas_init = g.vidas_init + modVida g.put() return vidas def menosVida(chat_id): g = ndb.Key(Game, chat_id).get() g.vidas -= 1 if g.vidas <= 0: delGame(chat_id) return True if g.vidas == 1: g.put() return 2 g.put() return False def getVidas(chat_id): g = ndb.Key(Game, chat_id).get() return g.vidas def getVidasInit(chat_id): g = ndb.Key(Game, chat_id).get() return g.vidas_init def getLetras(chat_id): g = ndb.Key(Game, chat_id).get() letras = [['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P'], ['A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L'], ['Z', 'X', 'C', 'V', 'B', 'N', 'M']] for i in range(len(g.letras)): for j in range(len(letras)): if g.letras[i].upper() in letras[j]: letras[j].remove(g.letras[i].upper()) return letras def checkLetra(chat_id, u_id, letra): #Ve claramente que é pura gambiarra! g = ndb.Key(Game, chat_id).get() chs = ['á','ã','â','é','ê','í','ó','õ','ô','ú','ç'] #Lista de caracteres especiais suportados no momento, NÃO ADICIONAR PALAVRAS COM CARACTERES NÃO SUPORTADOS! ch = ['a','a','a','e','e','i','o','o','o','u','c'] #Acompanha o chs para funcionar. Gambiarra. for i in range(len(chs)): chs[i] = chs[i].decode('utf-8') ch[i] = ch[i].decode('utf-8') letra.decode('utf-8') palavra = g.palavra.lower() nPalavra = g.palavra.lower() aux = [None] * len(nPalavra) for i in range(len(nPalavra)): if nPalavra[i] in chs: idc = chs.index(palavra[i]) idx = palavra.index(chs[idc]) aux[i] = chs[idc] nPalavra = nPalavra[:idx] + ch[idc] + nPalavra[idx+1:] #Reconstrói a palavra sem caracteres especiais if not (letra in g.letras): if letra.lower() in nPalavra.lower(): nMascara = '' score = 0 for i in range(len(nPalavra)): if nPalavra[i].lower() == letra: letraAnt = letra if aux[i]: #Se existem caracteres especiais letra = aux[i] nMascara = nMascara+letra #substitui a letra na posição letra = letraAnt score += 1 else: nMascara = nMascara+g.mascara[i] g.mascara = nMascara addScore(chat_id, u_id, (score*2)) g.letras.append(letra) g.put() if g.palavra.lower() == nMascara: return nMascara.encode('utf-8') return True g.letras.append(letra) g.put() return False g.put() return 2 def delGame(chat_id): g = ndb.Key(Game, chat_id).get() g.key.delete() #!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from django.core.urlresolvers import reverse from nose.plugins.skip import SkipTest from nose.tools import assert_true, assert_equal, assert_false from hadoop import cluster from hadoop.conf import HDFS_CLUSTERS from desktop.lib.django_test_util import make_logged_in_client from desktop.lib.test_utils import grant_access, add_to_group from libsentry import api from security.api.hive import _massage_uri, _get_splitted_path def mocked_get_api(user): return MockHiveApi(user) class MockHiveApi(object): def __init__(self, user): self.user = user def list_sentry_roles_by_group(self, groupName): # return GroupName only return [{'name': groupName}] class TestMockedApi(object): def setUp(self): if not hasattr(api, 'OriginalSentryApi'): api.OriginalSentryApi = api.get_api api.get_api = mocked_get_api self.client = make_logged_in_client(username='sentry_test', groupname='test', is_superuser=False) self.client_admin = make_logged_in_client(username='sentry_hue', groupname='hue', is_superuser=False) grant_access("sentry_test", "test", "security") grant_access("sentry_hue", "hue", "security") add_to_group("sentry_test") add_to_group("sentry_hue") raise SkipTest def tearDown(self): api.get_api = api.OriginalSentryApi def test_list_sentry_roles_by_group(self): response = self.client.post(reverse("security:list_sentry_roles_by_group"), {'groupName': ''}) assert_equal('*', json.loads(response.content).get('roles', [{'name': ''}])[0]['name'], response.content) response = self.client.post(reverse("security:list_sentry_roles_by_group"), {'groupName': 'test'}) assert_equal('test', json.loads(response.content).get('roles', [{'name': ''}])[0]['name'], response.content) response = self.client_admin.post(reverse("security:list_sentry_roles_by_group"), {'groupName': ''}) assert_equal(None, json.loads(response.content).get('roles', [{'name': ''}])[0]['name'], response.content) response = self.client_admin.post(reverse("security:list_sentry_roles_by_group"), {'groupName': 'test'}) assert_equal('test', json.loads(response.content).get('roles', [{'name': ''}])[0]['name'], response.content) class TestUtils(object): def test_massage_uri(self): finish = HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode') cluster.clear_caches() try: assert_equal('', _massage_uri('')) assert_equal('namenode/data', _massage_uri('hdfs:///data')) assert_equal('hdfs://nn:11/data', _massage_uri('hdfs://nn:11/data')) assert_equal('hdfs://logical/data', _massage_uri('hdfs://logical/data')) assert_equal('namenode/data', _massage_uri('/data')) assert_equal('file:///data', _massage_uri('file:///data')) finally: finish() finish = HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing('hdfs://fs_defaultfs:8021') cluster.clear_caches() try: assert_equal('', _massage_uri('')) assert_equal('hdfs://fs_defaultfs:8021/data', _massage_uri('hdfs:///data')) assert_equal('hdfs://nn:11/data', _massage_uri('hdfs://nn:11/data')) assert_equal('hdfs://logical/data', _massage_uri('hdfs://logical/data')) assert_equal('hdfs://fs_defaultfs:8021/data', _massage_uri('/data')) assert_equal('file:///data', _massage_uri('file:///data')) finally: finish() def test_get_splitted_path(self): assert_equal(('', '', ''), _get_splitted_path('')) assert_equal(('db', '', ''), _get_splitted_path('db')) assert_equal(('db', 'table', ''), _get_splitted_path('db.table')) assert_equal(('db', 'table', 'column'), _get_splitted_path('db.table.column')) assert_equal(('db', 'table', 'column'), _get_splitted_path('db.table.column.blah')) # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.osv import fields, osv class account_general_journal(osv.osv_memory): _inherit = "account.common.journal.report" _name = 'account.general.journal' _description = 'Account General Journal' _columns = { 'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True), } def _print_report(self, cr, uid, ids, data, context=None): data = self.pre_print_report(cr, uid, ids, data, context=context) return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: """ ================================================== Sparse linear algebra (:mod:`scipy.sparse.linalg`) ================================================== .. currentmodule:: scipy.sparse.linalg Abstract linear operators ------------------------- .. autosummary:: :toctree: generated/ LinearOperator -- abstract representation of a linear operator aslinearoperator -- convert an object to an abstract linear operator Matrix Operations ----------------- .. autosummary:: :toctree: generated/ inv -- compute the sparse matrix inverse expm -- compute the sparse matrix exponential expm_multiply -- compute the product of a matrix exponential and a matrix Matrix norms ------------ .. autosummary:: :toctree: generated/ norm -- Norm of a sparse matrix onenormest -- Estimate the 1-norm of a sparse matrix Solving linear problems ----------------------- Direct methods for linear equation systems: .. autosummary:: :toctree: generated/ spsolve -- Solve the sparse linear system Ax=b factorized -- Pre-factorize matrix to a function solving a linear system MatrixRankWarning -- Warning on exactly singular matrices use_solver -- Select direct solver to use Iterative methods for linear equation systems: .. autosummary:: :toctree: generated/ bicg -- Use BIConjugate Gradient iteration to solve A x = b bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b cg -- Use Conjugate Gradient iteration to solve A x = b cgs -- Use Conjugate Gradient Squared iteration to solve A x = b gmres -- Use Generalized Minimal RESidual iteration to solve A x = b lgmres -- Solve a matrix equation using the LGMRES algorithm minres -- Use MINimum RESidual iteration to solve Ax = b qmr -- Use Quasi-Minimal Residual iteration to solve A x = b Iterative methods for least-squares problems: .. autosummary:: :toctree: generated/ lsqr -- Find the least-squares solution to a sparse linear equation system lsmr -- Find the least-squares solution to a sparse linear equation system Matrix factorizations --------------------- Eigenvalue problems: .. autosummary:: :toctree: generated/ eigs -- Find k eigenvalues and eigenvectors of the square matrix A eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning Singular values problems: .. autosummary:: :toctree: generated/ svds -- Compute k singular values/vectors for a sparse matrix Complete or incomplete LU factorizations .. autosummary:: :toctree: generated/ splu -- Compute a LU decomposition for a sparse matrix spilu -- Compute an incomplete LU decomposition for a sparse matrix SuperLU -- Object representing an LU factorization Exceptions ---------- .. autosummary:: :toctree: generated/ ArpackNoConvergence ArpackError """ from __future__ import division, print_function, absolute_import from .isolve import * from .dsolve import * from .interface import * from .eigen import * from .matfuncs import * from ._onenormest import * from ._norm import * from ._expm_multiply import * __all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench #!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure safeseh setting is extracted properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['ninja']) CHDIR = 'linker-flags' test.run_gyp('safeseh.gyp', chdir=CHDIR) test.build('safeseh.gyp', test.ALL, chdir=CHDIR) def HasSafeExceptionHandlers(exe): full_path = test.built_file_path(exe, chdir=CHDIR) output = test.run_dumpbin('/LOADCONFIG', full_path) return ' Safe Exception Handler Table' in output # From MSDN: http://msdn.microsoft.com/en-us/library/9a89h429.aspx # If /SAFESEH is not specified, the linker will produce an image with a # table of safe exceptions handlers if all modules are compatible with # the safe exception handling feature. If any modules were not # compatible with safe exception handling feature, the resulting image # will not contain a table of safe exception handlers. if HasSafeExceptionHandlers('test_safeseh_default.exe'): test.fail_test() if HasSafeExceptionHandlers('test_safeseh_no.exe'): test.fail_test() if not HasSafeExceptionHandlers('test_safeseh_yes.exe'): test.fail_test() test.pass_test() """ Module iterchoices is intended as a helper to prevent cyclic dependencies between db models and to prevent database queries before database tables are created by syncdb. This code should be left alone or can be combined with other module which is not dependent on any database model nor already existing db connection for facilitating the normal order of loading modules. """ import logging import re import pprint import sys import traceback log = logging.getLogger('iterchoices') repeated_error = False def iterchoices(func): """Iterator for lazy evaluation of choices for database models. Examle Usage: class SomeNewModel(models.Model): abc = XyzChoiceCharField('SomeModelName',... choices=iterchoices(get_choices))) A function for evaluating items of choices (e.g. get_choices) is not evaluated when a model class (e.g. SomeNewModel) is inicialized. It is called by database model when it is first needed, only one time, not until all other models have been initialized. First possible call is usually for Django managent commands with the internal attribute "requires_model_validation = True", by e.g. dbsync, validate and typically commands changing structure of database. """ for item in func(): yield item def iterchoices_db(func): """ Iterator for lazy evaluation of choices for database models, modified for functions which need database access to get results. It is similar to "iterchoices" with the following difference: When the db model is thoroughly validated by database management commands (e.g. dbsync), all calls to "func" are skipped here to prevent possible failing database queries before database tables are created in the same transaction, which can be easy caused by livesettings. (The state of database connection could be broken without this workaround for some db backends.) """ # This test determines the conditions for which the call to enumerating # function "func" is to be skipped. # # Typically it should be skipped for syncdb and all commands which can be # usually called before the first syncdb. It should be called for commands # like runserver, runcgi, shell. # For many other commands is good both, either call or skip. # # This function should be tested with Django 1.2 and 1.3 # (Django 1.2 has worse support for error treatment inside transactions) # and for different db backends: postgres, sqlite3 and tests environment # # It would be better to find a solution inot here but by fix in Livesettings # (it looks not possible without a change in django.db) command = introspect_management_command() if command in ('syncdb', 'test', 'satchmo_store.shop.management.commands.satchmo_copy_static') \ or command.startswith('south.'): log.info('Skipped model choices initialization function <%s> because' ' of syncdb or other database management command' % str(func).split()[1]) else: log.debug('Called model choices initialization function <%s>' % str(func).split()[1]) for item in func(): yield item def introspect_management_command(): """ Introspection, what Django management command is actually running. Possible return values are: a) name of Django management internal command, e.g. 'runserver', 'runfcgi', 'syncdb' b) full name of external management command, e.g. 'south.management.commands.syncdb' c) string 'handler' in the case of running under handlers like uwsgi or mod_python. """ # Output of this function should be tested and have been tested with respect # to different commands and production server configurations including # all above-mentioned, with different installation methods # including compressed .egg, # on different platforms: Linux, Windows + Mingw, Windows + Cygwin # and also with clonesatchmo. global repeated_error try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back nidentify = 0 # how much steps of identification have been done command = '' try: while f is not None: co = f.f_code filename = co.co_filename.replace('\\', '/') if filename.find('/core/') >= 0 and nidentify < 3: # django.core is usually in '/django/core/' but it can be e.g. in /Django-x.y.z/core/ for some installation if filename.find('/core/management/base.',) >= 0: name = co.co_name if name in (('validate', 'handle'), ('execute',), ('run_from_argv',))[nidentify]: nidentify += 1 # analyze first argument of execute(self, *args, **options) if name == 'execute' and co.co_argcount == 1 and (co.co_flags & 0xC == 0xC): command = f.f_locals[co.co_varnames[0]].__module__.replace('django.core.management.commands.', '') elif filename.find('/core/management/commands/') >= 0: command = re.sub('.*/core/management/commands/(.*)\..*', '\\1', filename) nidentify = 3 elif filename.find('/core/handlers/') >= 0: command = 'handler' nidentify = 3 f = f.f_back except (AttributeError, IndexError, TypeError): pass # TODO: Remove the following line in January 2012 if nothing similar is reported log.debug('Management command: %s' % command) if (not re.match('[a-z_0-9.]+$', command) or nidentify < 3) and not repeated_error: log.error('Internal error in introspect_management_command. Report it to the author: hynekcer\n' + pprint.pformat(traceback.extract_stack())) repeated_error = True return command import os # toolchains options ARCH='arm' CPU='cortex-m4' CROSS_TOOL='keil' # get setting from environment. if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') # cross_tool provides the cross compiler # EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'C:/Program Files/CodeSourcery/arm-none-eabi/bin' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = r'E:/Keil' elif CROSS_TOOL == 'iar': PLATFORM = 'iar' IAR_PATH = r'C:/Program Files/IAR Systems/Embedded Workbench 6.0' if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') # BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections' CFLAGS = DEVICE AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-lpc40x.map,-cref,-u,Reset_Handler -T lpc40xx_rom.ld' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu Cortex-M4.fp' CFLAGS = DEVICE + ' --apcs=interwork' AFLAGS = DEVICE LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-lpc40xx.map --scatter lpc40xx_rom.sct' CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC' LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB' EXEC_PATH += '/arm/bin40/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD' CFLAGS = DEVICE CFLAGS += ' --diag_suppress Pa050' CFLAGS += ' --no_cse' CFLAGS += ' --no_unroll' CFLAGS += ' --no_inline' CFLAGS += ' --no_code_motion' CFLAGS += ' --no_tbaa' CFLAGS += ' --no_clustering' CFLAGS += ' --no_scheduling' CFLAGS += ' --debug' CFLAGS += ' --endian=little' CFLAGS += ' --cpu=Cortex-M4' CFLAGS += ' -e' CFLAGS += ' --fpu=None' CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"' CFLAGS += ' -Ol' CFLAGS += ' --use_c++_inline' AFLAGS = '' AFLAGS += ' -s+' AFLAGS += ' -w+' AFLAGS += ' -r' AFLAGS += ' --cpu Cortex-M4' AFLAGS += ' --fpu None' LFLAGS = ' --config lpc40xx_flash.icf' LFLAGS += ' --redirect _Printf=_PrintfTiny' LFLAGS += ' --redirect _Scanf=_ScanfSmall' LFLAGS += ' --entry __iar_program_start' EXEC_PATH = IAR_PATH + '/arm/bin/' POST_ACTION = '' ############################################################################ ## ## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary ## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary ## ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program; if not, write to the Free Software Foundation, Inc., ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ## ############################################################################ """ The Keybridge module implements generic X.509 key bridging. Keybridging is a method to let the client see a copy of the server's certificate (or vice versa), allowing it to inspect it and decide about its trustworthiness. Because of proxying the SSL/TLS connection, the client is not able to inspect the certificate of the server directly, therefore Zorp generates a certificate based on the server's certificate on-the-fly. This generated certificate is presented to the client. For details on configuring keybridging, see . """ from Zorp import * from Certificate_ import ZorpCertificate from FileLock import FileLock import os import OpenSSL import hashlib # # Key selector is a hash containing one or more ways to # identify a key or keypair. The meaning of various keys in the hash and how they are interpreted # is as follows: # # 'zms-key' Contains the unique name of a keypair in ZMS # 'bridge-trusted-key' Contains a certificate blob for which a new key can be generated, # the key must be signed by the 'trusted' CA # 'bridge-untrusted-key' Contains a certificate blob for which a new key can be generated, # the key must be signed by the 'untrusted' CA. # class X509KeyManager(object): """ """ def __init__(self): pass def getKeypair(self, selector): pass class X509KeyBridge(X509KeyManager): """ Class to perform SSL keybridging. This class is able to generate certificates mimicking another certificate, primarily used to transfer the information of a server's certificate to the client in keybridging. For details on configuring keybridging, see . key_file "" Name of the private key to be used for the newly generated certificates. key_passphrase "" Passphrase required to access the private key stored in key_file. cache_directory "" The directory where all automatically generated certificates are cached. trusted_ca_files None A tuple of cert_file, key_file, passphrase) for the CA used for keybridging trusted certificates. untrusted_ca_files None A tuple of cert_file, key_file, passphrase) for the CA used for keybridging untrusted certificates. """ def __new__(cls, *args, **kwargs): """ """ obj = super(X509KeyBridge, cls).__new__(cls) base = cls if base.__name__ != "X509KeyBridge": for base in cls.__bases__: if base.__name__ == "X509KeyBridge": break; if kwargs.has_key("key_pem"): base.__init__ = base._new_init else: base.__init__ = base._old_init return obj default_extension_whitelist = ('keyUsage', 'subjectAltName', 'extendedKeyUsage') def _old_init(self, key_file, cache_directory=None, trusted_ca_files=None, untrusted_ca_files=None, key_passphrase = "", extension_whitelist=None): """ key_file Name of the private key to be used for the newly generated certificates. key_passphrase "" Passphrase required to access the private key stored in key_file. cache_directory "/var/lib/zorp/keybridge-cache" The directory where all automatically generated certificates are cached. trusted_ca_files A tuple of cert_file, key_file, passphrase) for the CA used for keybridging trusted certificates. untrusted_ca_files None A tuple of cert_file, key_file, passphrase) for the CA used for keybridging untrusted certificates. extension_whitelist None Zorp transfers the following certificate extensions to the client side: Key Usage, Subject Alternative Name, Extended Key Usage. Other extensions will be automatically deleted during keybridging. This is needed because some certificate extensions contain references to the Issuer CA, which references become invalid for keybridged certificates. To transfer other extensions, list them in the extension_whitelist parameter. Note that modifying this parameter replaces the default values, so to extend the list of transferred extensions, include the 'keyUsage', 'subjectAltName', 'extendedKeyUsage' list as well. For example: self.extension_whitelist = ('keyUsage', 'subjectAltName', 'extendedKeyUsage', 'customExtension') """ """Constructor to initialize an X509KeyBridge instance This constructor initializes an X509KeyBridge instance by loading the necessary keys and certificates from files. Make sure that it is initialized once, instead of in every proxy instance as that may degrade performance. This may be achieved by putting the initialization into the class body or into global context. Arguments key_file -- name of the private key to be used for all newly generated certificates key_passphrase -- passphrase to use with private key key_file cache_directory -- name of a directory where all automatically generated certificates are cached trusted_ca_files -- a tuple of (cert_file, key_file, passphrase) for a CA to be used for signing certificates untrusted_ca_files -- a tuple of (cert_file, key_file, passphrase) for a CA to be used for signing untrusted certificates """ key_pem = self.readPEM(key_file) if trusted_ca_files: (trusted_cert_file, trusted_key_file, trusted_passphrase) = trusted_ca_files try: passphrase = trusted_passphrase except IndexError: passphrase = "" trusted_ca_pems = (self.readPEM(trusted_cert_file), self.readPEM(trusted_key_file), passphrase) if untrusted_ca_files: (untrusted_cert_file, untrusted_key_file, untrusted_passphrase) = untrusted_ca_files try: passphrase = untrusted_passphrase except IndexError: passphrase = "" untrusted_ca_pems = (self.readPEM(untrusted_cert_file), self.readPEM(untrusted_key_file), passphrase) self._new_init(key_pem, cache_directory, trusted_ca_pems, untrusted_ca_pems, key_passphrase, extension_whitelist) def _new_init(self, key_pem, cache_directory=None, trusted_ca_files=None, untrusted_ca_files=None, key_passphrase = "", extension_whitelist=None): """ """ if cache_directory: self.cache_directory = cache_directory else: self.cache_directory = "/var/lib/zorp/keybridge-cache" if not extension_whitelist: extension_whitelist = self.default_extension_whitelist self.extension_whitelist = extension_whitelist self.initialized = 0 try: self._load_privatekey(key_pem, trusted_ca_files, untrusted_ca_files, key_passphrase) self.initialized = 1 except IOError, e: log(None, CORE_ERROR, 3, "Error opening key or certificate file for keybridge; file='%s', error='%s'", (e.filename, e.strerror)) def _load_privatekey(self, key_pem, trusted_ca_files, untrusted_ca_files, key_passphrase): """ """ if not trusted_ca_files: trusted_ca_files = (None, None, None) (trusted_cert_file, trusted_key_file, trusted_passphrase) = trusted_ca_files self.key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key_pem, key_passphrase) self.key_pem = key_pem try: passphrase = trusted_passphrase except IndexError: passphrase = "" self.trusted_ca = (OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, trusted_cert_file), OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, trusted_key_file, passphrase)) self.trusted_ca_pem = trusted_cert_file self.untrusted_ca_pem = "" if untrusted_ca_files: (untrusted_cert_file, untrusted_key_file, untrusted_passphrase) = untrusted_ca_files try: passphrase = untrusted_passphrase except IndexError: passphrase = "" self.untrusted_ca = (OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, untrusted_cert_file), OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, untrusted_key_file, passphrase)) self.untrusted_ca_pem = untrusted_cert_file def readPEM(self, filename): """ """ log(None, CORE_DEBUG, 6, "Reading PEM file; filename='%s'" % filename) f = open(filename, 'r') res = f.read() f.close() return res def getCachedKey(self, session_id, cert_file, cert_server): """ """ def is_md5(cert): return cert.get_signature_algorithm().lower().find("md5") != -1 log(session_id, CORE_DEBUG, 5, "Loading cached certificate; file='%s'", cert_file) try: orig_cert = open(cert_file + '.orig', 'r').read() except IOError, e: log(session_id, CORE_DEBUG, 5, "Original keybridged certificate cannot be read, regenerating; file='%s', error='%s'", (cert_file, e.strerror)) raise KeyError('not in cache') try: cached_cert = open(cert_file, 'r').read() cached_cert_x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cached_cert) except IOError, e: log(session_id, CORE_DEBUG, 5, "Cached certificate cannot be read, regenerating; file='%s', error='%s'", (cert_file, e.strerror)) raise KeyError('not in cache') except OpenSSL.crypto.Error: log(session_id, CORE_DEBUG, 5, "Cached certificate is not valid, regenerating; file='%s'", cert_file) raise KeyError('not in cache') cert_server_x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_server) # Originally we signed every cert using md5, regardless of the server cert's algo. # We regenerate every cert if the cached version uses md5 while the server cert uses a different algo. if orig_cert == cert_server: if is_md5(cached_cert_x509) and not is_md5(cert_server_x509): log(session_id, CORE_DEBUG, 5, "Cached certificate is MD5 signed while server's certificate is not, regenerating; file='%s', cached_algo='%s', server_algo='%s'", (cert_file, cached_cert_x509.get_signature_algorithm(), cert_server_x509.get_signature_algorithm())) else: log(session_id, CORE_DEBUG, 5, "Cached certificate ok, reusing; file='%s'", cert_file) return (cached_cert, OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key)) else: log(session_id, CORE_DEBUG, 5, "Cached certificate changed, regenerating; file='%s'", cert_file) raise KeyError, 'certificate changed' def storeCachedKey(self, session_id, cert_file, new_blob, orig_blob): """ """ try: try: os.unlink(cert_file) except OSError: pass try: os.unlink(cert_file + '.orig') except OSError: pass log(session_id, CORE_DEBUG, 5, "Storing cached certificate; file='%s'", cert_file) f = open(cert_file, 'w') f.write(new_blob) f.close() f = open(cert_file + '.orig', 'w') f.write(orig_blob) f.close() except IOError, e: log(session_id, CORE_ERROR, 2, "Error storing generated X.509 certificate in the cache; file='%s', error='%s'", (cert_file, e.strerror)) def getLastSerial(self): """ """ serial = 1 for file in os.listdir(self.cache_directory): if file[-4:] != '.crt': continue f = open("%s/%s" % (self.cache_directory, file), 'r') data = f.read() f.close() cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, data) cser = cert.get_serial_number() if cser > serial: serial = cser return serial def genCert(self, key, orig_cert, ca_cert, ca_key, serial): """ """ filetype = OpenSSL.crypto.FILETYPE_PEM certificate = OpenSSL.crypto.dump_certificate(filetype, orig_cert) if self.extension_whitelist: # delete extensions not on whitelist zorp_certificate = ZorpCertificate(certificate) certificate = zorp_certificate.handle_extensions(self.extension_whitelist) new_cert = OpenSSL.crypto.load_certificate(filetype, certificate) new_cert.set_serial_number(serial) new_cert.set_issuer(ca_cert.get_subject()) new_cert.set_pubkey(key) hash_alg = orig_cert.get_signature_algorithm() try: new_cert.sign(ca_key, hash_alg) except ValueError, e: log(None, CORE_INFO, 3, "Could not sign cert with hash algorithm, falling back to SHA256; hash_alg='%s'", hash_alg) new_cert.sign(ca_key, 'sha256') return new_cert def _save_new_cert(self, session_id, orig_blob, ca_pair, cert_file, serial): """ """ orig_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, orig_blob) new_cert = self.genCert(self.key, orig_cert, ca_pair[0], ca_pair[1], serial) new_blob = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, new_cert) self.storeCachedKey(session_id, cert_file, new_blob, orig_blob) return new_blob def _dump_privatekey(self): """ """ return OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key) def _get_serial_filename(self): """ """ return '%s/serial.txt' % self.cache_directory def getKeypair(self, session_id, selector): """ """ if not self.initialized: log(session_id, CORE_ERROR, 3, "Keybridge not completely initialized, error generating keypair;") return (None, None) try: trusted = 1 orig_blob = selector['bridge-trusted-key'] hash_key = orig_blob + self.trusted_ca_pem + self.key_pem except KeyError: trusted = 0 orig_blob = selector['bridge-untrusted-key'] hash_key = orig_blob + self.untrusted_ca_pem + self.key_pem hash = hashlib.sha256(hash_key).hexdigest() if trusted: cert_file = '%s/trusted-%s.crt' % (self.cache_directory, hash) ca_pair = self.trusted_ca else: cert_file = '%s/untrusted-%s.crt' % (self.cache_directory, hash) ca_pair = self.untrusted_ca with FileLock("%s/.lock" % self.cache_directory): try: return self.getCachedKey(session_id, cert_file, orig_blob) except KeyError: log(session_id, CORE_DEBUG, 5, "Certificate not found in the cache, regenerating;") serial_file = self._get_serial_filename() serial_pos = "" try: serial_pos = "file open" serial_file_fd = open(serial_file, 'r') serial_pos = "file read" serial_file_data = serial_file_fd.read().strip() serial_pos = "turn to integer" serial = int(serial_file_data) serial_pos = None except (ValueError, IOError): serial = self.getLastSerial() log(session_id, CORE_ERROR, 3, "On-line CA serial file not found, reinitializing; file='%s', serial='%d', pos='%s'", (serial_file, serial, serial_pos)) serial = serial + 1 try: with open(serial_file, 'w') as f: f.write(str(serial)) except IOError, e: log(session_id, CORE_ERROR, 2, "Cannot write serial number of on-line CA; file='%s', error='%s'", (serial_file, e.strerror)) new_blob = self._save_new_cert(session_id, orig_blob, ca_pair, cert_file, serial) return (new_blob, self._dump_privatekey()) import os from setuptools import find_packages from setuptools import setup import django_xmlrpc setup(name='django-xmlrpc', version=django_xmlrpc.__version__, description='XML-RPC Server App for the Django framework.', long_description=open(os.path.join('README.rst')).read(), keywords='django, service, xmlrpc', author='Graham Binns', author_email='graham.binns@gmail.com', maintainer='Fantomas42', maintainer_email='fantomas42@gmail.com', url='https://github.com/Fantomas42/django-xmlrpc', packages=find_packages(), classifiers=[ 'Framework :: Django', 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules'], license='New BSD License', include_package_data=True, zip_safe=False ) import unittest from test import test_support import subprocess import sys import signal import os import errno import tempfile import time import re import sysconfig try: import resource except ImportError: resource = None mswindows = (sys.platform == "win32") # # Depends on the following external programs: Python # if mswindows: SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' 'os.O_BINARY);') else: SETBINARY = '' try: mkstemp = tempfile.mkstemp except AttributeError: # tempfile.mkstemp is not available def mkstemp(): """Replacement for mkstemp, calling mktemp.""" fname = tempfile.mktemp() return os.open(fname, os.O_RDWR|os.O_CREAT), fname class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test # doesn't crash on some buildbots (Alphas in particular). test_support.reap_children() def tearDown(self): for inst in subprocess._active: inst.wait() subprocess._cleanup() self.assertFalse(subprocess._active, "subprocess._active not empty") def assertStderrEqual(self, stderr, expected, msg=None): # In a debug build, stuff like "[6580 refs]" is printed to stderr at # shutdown time. That frustrates tests trying to check stderr produced # from a spawned Python process. actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr) self.assertEqual(actual, expected, msg) class PopenTestException(Exception): pass class PopenExecuteChildRaises(subprocess.Popen): """Popen subclass for testing cleanup of subprocess.PIPE filehandles when _execute_child fails. """ def _execute_child(self, *args, **kwargs): raise PopenTestException("Forced Exception for Test") class ProcessTestCase(BaseTestCase): def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47) def test_check_call_zero(self): # check_call() function with zero return code rc = subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(0)"]) self.assertEqual(rc, 0) def test_check_call_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(c.exception.returncode, 47) def test_check_output(self): # check_output() function with zero return code output = subprocess.check_output( [sys.executable, "-c", "print 'BDFL'"]) self.assertIn('BDFL', output) def test_check_output_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_output( [sys.executable, "-c", "import sys; sys.exit(5)"]) self.assertEqual(c.exception.returncode, 5) def test_check_output_stderr(self): # check_output() function stderr redirected to stdout output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"], stderr=subprocess.STDOUT) self.assertIn('BDFL', output) def test_check_output_stdout_arg(self): # check_output() function stderr redirected to stdout with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print 'will not be run'"], stdout=sys.stdout) self.fail("Expected ValueError when stdout arg supplied.") self.assertIn('stdout', c.exception.args[0]) def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1) def test_invalid_args(self): # Popen() called with invalid arguments should raise TypeError # but Popen.__del__ should not complain (issue #12085) with test_support.captured_stderr() as s: self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1) argcount = subprocess.Popen.__init__.__code__.co_argcount too_many_args = [0] * (argcount + 1) self.assertRaises(TypeError, subprocess.Popen, *too_many_args) self.assertEqual(s.getvalue(), '') def test_stdin_none(self): # .stdin is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print "banana"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdin, None) def test_stdout_none(self): # .stdout is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print " this bit of output is from a ' 'test of stdout in a different ' 'process ..."'], stdin=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdin.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdout, None) def test_stderr_none(self): # .stderr is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print "banana"'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stdin.close) p.wait() self.assertEqual(p.stderr, None) def test_executable_with_cwd(self): python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen(["somethingyoudonthave", "-c", "import sys; sys.exit(47)"], executable=sys.executable, cwd=python_dir) p.wait() self.assertEqual(p.returncode, 47) @unittest.skipIf(sysconfig.is_python_build(), "need an installed Python. See #7774") def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. p = subprocess.Popen(["somethingyoudonthave", "-c", "import sys; sys.exit(47)"], executable=sys.executable) p.wait() self.assertEqual(p.returncode, 47) def test_stdin_pipe(self): # stdin redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.stdin.write("pear") p.stdin.close() p.wait() self.assertEqual(p.returncode, 1) def test_stdin_filedes(self): # stdin is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() os.write(d, "pear") os.lseek(d, 0, 0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=d) p.wait() self.assertEqual(p.returncode, 1) def test_stdin_fileobj(self): # stdin is set to open file object tf = tempfile.TemporaryFile() tf.write("pear") tf.seek(0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=tf) p.wait() self.assertEqual(p.returncode, 1) def test_stdout_pipe(self): # stdout redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "orange") def test_stdout_filedes(self): # stdout is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=d) p.wait() os.lseek(d, 0, 0) self.assertEqual(os.read(d, 1024), "orange") def test_stdout_fileobj(self): # stdout is set to open file object tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=tf) p.wait() tf.seek(0) self.assertEqual(tf.read(), "orange") def test_stderr_pipe(self): # stderr redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=subprocess.PIPE) self.addCleanup(p.stderr.close) self.assertStderrEqual(p.stderr.read(), "strawberry") def test_stderr_filedes(self): # stderr is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=d) p.wait() os.lseek(d, 0, 0) self.assertStderrEqual(os.read(d, 1024), "strawberry") def test_stderr_fileobj(self): # stderr is set to open file object tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), "strawberry") def test_stdout_stderr_pipe(self): # capture stdout and stderr to the same pipe p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.addCleanup(p.stdout.close) self.assertStderrEqual(p.stdout.read(), "appleorange") def test_stdout_stderr_file(self): # capture stdout and stderr to the same open file tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=tf, stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), "appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). cmd = r"import sys, os; sys.exit(os.write(sys.stdout.fileno(), '.\n'))" rc = subprocess.call([sys.executable, "-c", cmd], stdout=1) self.assertEqual(rc, 2) def test_cwd(self): tmpdir = tempfile.gettempdir() # We cannot use os.path.realpath to canonicalize the path, # since it doesn't expand Tru64 {memb} strings. See bug 1063571. cwd = os.getcwd() os.chdir(tmpdir) tmpdir = os.getcwd() os.chdir(cwd) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getcwd())'], stdout=subprocess.PIPE, cwd=tmpdir) self.addCleanup(p.stdout.close) normcase = os.path.normcase self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir)) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "orange") def test_communicate_stdin(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.communicate("pear") self.assertEqual(p.returncode, 1) def test_communicate_stdout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("pineapple")'], stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, "pineapple") self.assertEqual(stderr, None) def test_communicate_stderr(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("pineapple")'], stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertStderrEqual(stderr, "pineapple") def test_communicate(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stderr.write("pineapple");' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) (stdout, stderr) = p.communicate("banana") self.assertEqual(stdout, "banana") self.assertStderrEqual(stderr, "pineapple") # This test is Linux specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") # Test for the fd leak reported in http://bugs.python.org/issue2791. def test_communicate_pipe_fd_leak(self): fd_directory = '/proc/%d/fd' % os.getpid() num_fds_before_popen = len(os.listdir(fd_directory)) p = subprocess.Popen([sys.executable, "-c", "print()"], stdout=subprocess.PIPE) p.communicate() num_fds_after_communicate = len(os.listdir(fd_directory)) del p num_fds_after_destruction = len(os.listdir(fd_directory)) self.assertEqual(num_fds_before_popen, num_fds_after_destruction) self.assertEqual(num_fds_before_popen, num_fds_after_communicate) def test_communicate_returns(self): # communicate() should return None if no redirection is active p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(47)"]) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertEqual(stderr, None) def test_communicate_pipe_buf(self): # communicate() with writes larger than pipe_buf # This test will probably deadlock rather than fail, if # communicate() does not work properly. x, y = os.pipe() if mswindows: pipe_buf = 512 else: pipe_buf = os.fpathconf(x, "PC_PIPE_BUF") os.close(x) os.close(y) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read(47));' 'sys.stderr.write("xyz"*%d);' 'sys.stdout.write(sys.stdin.read())' % pipe_buf], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) string_to_write = "abc"*pipe_buf (stdout, stderr) = p.communicate(string_to_write) self.assertEqual(stdout, string_to_write) def test_writes_before_communicate(self): # stdin.write before communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.stdin.write("banana") (stdout, stderr) = p.communicate("split") self.assertEqual(stdout, "bananasplit") self.assertStderrEqual(stderr, "") def test_universal_newlines(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' 'sys.stdout.flush();' 'sys.stdout.write("line3\\r\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line4\\r");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline5");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline6");'], stdout=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) stdout = p.stdout.read() if hasattr(file, 'newlines'): # Interpreter with universal newline support self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6") else: # Interpreter without universal newline support self.assertEqual(stdout, "line1\nline2\rline3\r\nline4\r\nline5\nline6") def test_universal_newlines_communicate(self): # universal newlines through communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' 'sys.stdout.flush();' 'sys.stdout.write("line3\\r\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line4\\r");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline5");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline6");'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate() if hasattr(file, 'newlines'): # Interpreter with universal newline support self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6") else: # Interpreter without universal newline support self.assertEqual(stdout, "line1\nline2\rline3\r\nline4\r\nline5\nline6") def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] try: for i in range(max_handles): try: handles.append(os.open(test_support.TESTFN, os.O_WRONLY | os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) test_support.unlink(test_support.TESTFN) def test_list2cmdline(self): self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']), '"a b c" d e') self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']), 'ab\\"c \\ d') self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']), 'ab\\"c " \\\\" d') self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']), 'a\\\\\\b "de fg" h') self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']), 'a\\\\\\"b c d') self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']), '"a\\\\b c" d e') self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']), '"a\\\\b\\ c" d e') self.assertEqual(subprocess.list2cmdline(['ab', '']), 'ab ""') def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(1)"]) count = 0 while p.poll() is None: time.sleep(0.1) count += 1 # We expect that the poll loop probably went around about 10 times, # but, based on system scheduling we can't control, it's possible # poll() never returned None. It "should be" very rare that it # didn't go around at least twice. self.assertGreaterEqual(count, 2) # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0) def test_wait(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(2)"]) self.assertEqual(p.wait(), 0) # Subsequent invocations should just return the returncode self.assertEqual(p.wait(), 0) def test_invalid_bufsize(self): # an invalid type of the bufsize argument should raise # TypeError. with self.assertRaises(TypeError): subprocess.Popen([sys.executable, "-c", "pass"], "orange") def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust # the maximum number of open fds. 1024 seems a very common # value for that limit, but Windows has 2048, so we loop # 1024 times (each call leaked two fds). for i in range(1024): # Windows raises IOError. Others raise OSError. with self.assertRaises(EnvironmentError) as c: subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # ignore errors that indicate the command was not found if c.exception.errno not in (errno.ENOENT, errno.EACCES): raise c.exception def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released ifhandle, ifname = mkstemp() ofhandle, ofname = mkstemp() efhandle, efname = mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) except OSError: os.close(ifhandle) os.remove(ifname) os.close(ofhandle) os.remove(ofname) os.close(efhandle) os.remove(efname) self.assertFalse(os.path.exists(ifname)) self.assertFalse(os.path.exists(ofname)) self.assertFalse(os.path.exists(efname)) def test_communicate_epipe(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.communicate("x" * 2**20) def test_communicate_epipe_only_stdin(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) time.sleep(2) p.communicate("x" * 2**20) # This test is Linux-ish specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") def test_failed_child_execute_fd_leak(self): """Test for the fork() failure fd leak reported in issue16327.""" fd_directory = '/proc/%d/fd' % os.getpid() fds_before_popen = os.listdir(fd_directory) with self.assertRaises(PopenTestException): PopenExecuteChildRaises( [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOTE: This test doesn't verify that the real _execute_child # does not close the file descriptors itself on the way out # during an exception. Code inspection has confirmed that. fds_after_exception = os.listdir(fd_directory) self.assertEqual(fds_before_popen, fds_after_exception) # context manager class _SuppressCoreFiles(object): """Try to prevent core files from being created.""" old_limit = None def __enter__(self): """Try to save previous ulimit, then set it to (0, 0).""" if resource is not None: try: self.old_limit = resource.getrlimit(resource.RLIMIT_CORE) resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) except (ValueError, resource.error): pass if sys.platform == 'darwin': # Check if the 'Crash Reporter' on OSX was configured # in 'Developer' mode and warn that it will get triggered # when it is. # # This assumes that this context manager is used in tests # that might trigger the next manager. value = subprocess.Popen(['/usr/bin/defaults', 'read', 'com.apple.CrashReporter', 'DialogType'], stdout=subprocess.PIPE).communicate()[0] if value.strip() == b'developer': print "this tests triggers the Crash Reporter, that is intentional" sys.stdout.flush() def __exit__(self, *args): """Return core file behavior to default.""" if self.old_limit is None: return if resource is not None: try: resource.setrlimit(resource.RLIMIT_CORE, self.old_limit) except (ValueError, resource.error): pass @unittest.skipUnless(hasattr(signal, 'SIGALRM'), "Requires signal.SIGALRM") def test_communicate_eintr(self): # Issue #12493: communicate() should handle EINTR def handler(signum, frame): pass old_handler = signal.signal(signal.SIGALRM, handler) self.addCleanup(signal.signal, signal.SIGALRM, old_handler) # the process is running for 2 seconds args = [sys.executable, "-c", 'import time; time.sleep(2)'] for stream in ('stdout', 'stderr'): kw = {stream: subprocess.PIPE} with subprocess.Popen(args, **kw) as process: signal.alarm(1) # communicate() will be interrupted by SIGALRM process.communicate() @unittest.skipIf(mswindows, "POSIX specific tests") class POSIXProcessTestCase(BaseTestCase): def test_exceptions(self): # caught & re-raised exceptions with self.assertRaises(OSError) as c: p = subprocess.Popen([sys.executable, "-c", ""], cwd="/this/path/does/not/exist") # The attribute child_traceback should contain "os.chdir" somewhere. self.assertIn("os.chdir", c.exception.child_traceback) def test_run_abort(self): # returncode handles signal termination with _SuppressCoreFiles(): p = subprocess.Popen([sys.executable, "-c", "import os; os.abort()"]) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_preexec(self): # preexec function p = subprocess.Popen([sys.executable, "-c", "import sys, os;" "sys.stdout.write(os.getenv('FRUIT'))"], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "apple") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child( self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): try: subprocess.Popen._execute_child( self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (p2cwrite, c2pread, errread)) finally: map(os.close, devzero_fds) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise RuntimeError("force the _execute_child() errpipe_data path.") with self.assertRaises(RuntimeError): self._TestExecuteChildPopen( self, [sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_args_string(self): # args is a string f, fname = mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), "apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), "apple") def test_call_string(self): # call() function with string argument on UNIX f, fname = mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), sh) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn('KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertStderrEqual(stderr, '') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertStderrEqual(stderr, '') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 newfds = [] for a in fds: b = os.dup(a) newfds.append(b) if a == 0: stdin = b try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() err = test_support.strip_python_stderr(err) self.assertEqual((out, err), (b'apple', b'orange')) finally: for b, a in zip(newfds, fds): os.dup2(b, a) for b in newfds: os.close(b) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = [os.dup(fd) for fd in range(3)] try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = test_support.strip_python_stderr(os.read(stderr_no, 1024)) finally: for std, saved in enumerate(saved_fds): os.dup2(saved, std) os.close(saved) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = test_support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr) def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(EnvironmentError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_pipe_cloexec(self): # Issue 12786: check that the communication pipes' FDs are set CLOEXEC, # and are not inherited by another child process. p1 = subprocess.Popen([sys.executable, "-c", 'import os;' 'os.read(0, 1)' ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen([sys.executable, "-c", """if True: import os, errno, sys for fd in %r: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise else: sys.exit(1) sys.exit(0) """ % [f.fileno() for f in (p1.stdin, p1.stdout, p1.stderr)] ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) p1.communicate('foo') _, stderr = p2.communicate() self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr)) @unittest.skipUnless(mswindows, "Windows specific tests") class Win32ProcessTestCase(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], stdout=subprocess.PIPE, close_fds=True) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn("physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn("physalis", p.stdout.read()) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, '') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate') @unittest.skipUnless(getattr(subprocess, '_has_poll', False), "poll system call not supported") class ProcessTestCaseNoPoll(ProcessTestCase): def setUp(self): subprocess._has_poll = False ProcessTestCase.setUp(self) def tearDown(self): subprocess._has_poll = True ProcessTestCase.tearDown(self) class HelperFunctionTests(unittest.TestCase): @unittest.skipIf(mswindows, "errno and EINTR make no sense on windows") def test_eintr_retry_call(self): record_calls = [] def fake_os_func(*args): record_calls.append(args) if len(record_calls) == 2: raise OSError(errno.EINTR, "fake interrupted system call") return tuple(reversed(args)) self.assertEqual((999, 256), subprocess._eintr_retry_call(fake_os_func, 256, 999)) self.assertEqual([(256, 999)], record_calls) # This time there will be an EINTR so it will loop once. self.assertEqual((666,), subprocess._eintr_retry_call(fake_os_func, 666)) self.assertEqual([(256, 999), (666,), (666,)], record_calls) @unittest.skipUnless(mswindows, "mswindows only") class CommandsWithSpaces (BaseTestCase): def setUp(self): super(CommandsWithSpaces, self).setUp() f, fname = mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" ) os.close(f) def tearDown(self): os.remove(self.fname) super(CommandsWithSpaces, self).tearDown() def with_spaces(self, *args, **kwargs): kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(*args, **kwargs) self.addCleanup(p.stdout.close) self.assertEqual( p.stdout.read ().decode("mbcs"), "2 [%r, 'ab cd']" % self.fname ) def test_shell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd"), shell=1) def test_shell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1) def test_noshell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd")) def test_noshell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"]) def test_main(): unit_tests = (ProcessTestCase, POSIXProcessTestCase, Win32ProcessTestCase, ProcessTestCaseNoPoll, HelperFunctionTests, CommandsWithSpaces) test_support.run_unittest(*unit_tests) test_support.reap_children() if __name__ == "__main__": test_main() """ Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py. Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen) Original source: LaserJet IIP Printer User's Manual HP part no 33471-90901, Hewlet-Packard, June 1989. (Used with permission) """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='hp-roman8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> '\x81' # 0x81 -> '\x82' # 0x82 -> '\x83' # 0x83 -> '\x84' # 0x84 -> '\x85' # 0x85 -> '\x86' # 0x86 -> '\x87' # 0x87 -> '\x88' # 0x88 -> '\x89' # 0x89 -> '\x8a' # 0x8A -> '\x8b' # 0x8B -> '\x8c' # 0x8C -> '\x8d' # 0x8D -> '\x8e' # 0x8E -> '\x8f' # 0x8F -> '\x90' # 0x90 -> '\x91' # 0x91 -> '\x92' # 0x92 -> '\x93' # 0x93 -> '\x94' # 0x94 -> '\x95' # 0x95 -> '\x96' # 0x96 -> '\x97' # 0x97 -> '\x98' # 0x98 -> '\x99' # 0x99 -> '\x9a' # 0x9A -> '\x9b' # 0x9B -> '\x9c' # 0x9C -> '\x9d' # 0x9D -> '\x9e' # 0x9E -> '\x9f' # 0x9F -> '\xa0' # 0xA0 -> NO-BREAK SPACE '\xc0' # 0xA1 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc2' # 0xA2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc8' # 0xA3 -> LATIN CAPITAL LETTER E WITH GRAVE '\xca' # 0xA4 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0xA5 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xce' # 0xA6 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xA7 -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xb4' # 0xA8 -> ACUTE ACCENT '\u02cb' # 0xA9 -> MODIFIER LETTER GRAVE ACCENT (MANDARIN CHINESE FOURTH TONE) '\u02c6' # 0xAA -> MODIFIER LETTER CIRCUMFLEX ACCENT '\xa8' # 0xAB -> DIAERESIS '\u02dc' # 0xAC -> SMALL TILDE '\xd9' # 0xAD -> LATIN CAPITAL LETTER U WITH GRAVE '\xdb' # 0xAE -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\u20a4' # 0xAF -> LIRA SIGN '\xaf' # 0xB0 -> MACRON '\xdd' # 0xB1 -> LATIN CAPITAL LETTER Y WITH ACUTE '\xfd' # 0xB2 -> LATIN SMALL LETTER Y WITH ACUTE '\xb0' # 0xB3 -> DEGREE SIGN '\xc7' # 0xB4 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xe7' # 0xB5 -> LATIN SMALL LETTER C WITH CEDILLA '\xd1' # 0xB6 -> LATIN CAPITAL LETTER N WITH TILDE '\xf1' # 0xB7 -> LATIN SMALL LETTER N WITH TILDE '\xa1' # 0xB8 -> INVERTED EXCLAMATION MARK '\xbf' # 0xB9 -> INVERTED QUESTION MARK '\xa4' # 0xBA -> CURRENCY SIGN '\xa3' # 0xBB -> POUND SIGN '\xa5' # 0xBC -> YEN SIGN '\xa7' # 0xBD -> SECTION SIGN '\u0192' # 0xBE -> LATIN SMALL LETTER F WITH HOOK '\xa2' # 0xBF -> CENT SIGN '\xe2' # 0xC0 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xea' # 0xC1 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xf4' # 0xC2 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xfb' # 0xC3 -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xe1' # 0xC4 -> LATIN SMALL LETTER A WITH ACUTE '\xe9' # 0xC5 -> LATIN SMALL LETTER E WITH ACUTE '\xf3' # 0xC6 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0xC7 -> LATIN SMALL LETTER U WITH ACUTE '\xe0' # 0xC8 -> LATIN SMALL LETTER A WITH GRAVE '\xe8' # 0xC9 -> LATIN SMALL LETTER E WITH GRAVE '\xf2' # 0xCA -> LATIN SMALL LETTER O WITH GRAVE '\xf9' # 0xCB -> LATIN SMALL LETTER U WITH GRAVE '\xe4' # 0xCC -> LATIN SMALL LETTER A WITH DIAERESIS '\xeb' # 0xCD -> LATIN SMALL LETTER E WITH DIAERESIS '\xf6' # 0xCE -> LATIN SMALL LETTER O WITH DIAERESIS '\xfc' # 0xCF -> LATIN SMALL LETTER U WITH DIAERESIS '\xc5' # 0xD0 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xee' # 0xD1 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xd8' # 0xD2 -> LATIN CAPITAL LETTER O WITH STROKE '\xc6' # 0xD3 -> LATIN CAPITAL LETTER AE '\xe5' # 0xD4 -> LATIN SMALL LETTER A WITH RING ABOVE '\xed' # 0xD5 -> LATIN SMALL LETTER I WITH ACUTE '\xf8' # 0xD6 -> LATIN SMALL LETTER O WITH STROKE '\xe6' # 0xD7 -> LATIN SMALL LETTER AE '\xc4' # 0xD8 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xec' # 0xD9 -> LATIN SMALL LETTER I WITH GRAVE '\xd6' # 0xDA -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0xDB -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xc9' # 0xDC -> LATIN CAPITAL LETTER E WITH ACUTE '\xef' # 0xDD -> LATIN SMALL LETTER I WITH DIAERESIS '\xdf' # 0xDE -> LATIN SMALL LETTER SHARP S (GERMAN) '\xd4' # 0xDF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xc1' # 0xE0 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc3' # 0xE1 -> LATIN CAPITAL LETTER A WITH TILDE '\xe3' # 0xE2 -> LATIN SMALL LETTER A WITH TILDE '\xd0' # 0xE3 -> LATIN CAPITAL LETTER ETH (ICELANDIC) '\xf0' # 0xE4 -> LATIN SMALL LETTER ETH (ICELANDIC) '\xcd' # 0xE5 -> LATIN CAPITAL LETTER I WITH ACUTE '\xcc' # 0xE6 -> LATIN CAPITAL LETTER I WITH GRAVE '\xd3' # 0xE7 -> LATIN CAPITAL LETTER O WITH ACUTE '\xd2' # 0xE8 -> LATIN CAPITAL LETTER O WITH GRAVE '\xd5' # 0xE9 -> LATIN CAPITAL LETTER O WITH TILDE '\xf5' # 0xEA -> LATIN SMALL LETTER O WITH TILDE '\u0160' # 0xEB -> LATIN CAPITAL LETTER S WITH CARON '\u0161' # 0xEC -> LATIN SMALL LETTER S WITH CARON '\xda' # 0xED -> LATIN CAPITAL LETTER U WITH ACUTE '\u0178' # 0xEE -> LATIN CAPITAL LETTER Y WITH DIAERESIS '\xff' # 0xEF -> LATIN SMALL LETTER Y WITH DIAERESIS '\xde' # 0xF0 -> LATIN CAPITAL LETTER THORN (ICELANDIC) '\xfe' # 0xF1 -> LATIN SMALL LETTER THORN (ICELANDIC) '\xb7' # 0xF2 -> MIDDLE DOT '\xb5' # 0xF3 -> MICRO SIGN '\xb6' # 0xF4 -> PILCROW SIGN '\xbe' # 0xF5 -> VULGAR FRACTION THREE QUARTERS '\u2014' # 0xF6 -> EM DASH '\xbc' # 0xF7 -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xF8 -> VULGAR FRACTION ONE HALF '\xaa' # 0xF9 -> FEMININE ORDINAL INDICATOR '\xba' # 0xFA -> MASCULINE ORDINAL INDICATOR '\xab' # 0xFB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\u25a0' # 0xFC -> BLACK SQUARE '\xbb' # 0xFD -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xb1' # 0xFE -> PLUS-MINUS SIGN '\ufffe' ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table) import cherrypy # 這是 MAN 類別的定義 ''' # 在 application 中導入子模組 import programs.cdag7.man as cdag7_man # 加入 cdag7 模組下的 man.py 且以子模組 man 對應其 MAN() 類別 root.cdag7.man = cdag7_man.MAN() # 完成設定後, 可以利用 /cdag7/man/assembly # 呼叫 man.py 中 MAN 類別的 assembly 方法 ''' class MAN(object): # 各組利用 index 引導隨後的程式執行 @cherrypy.expose def index(self, *args, **kwargs): outstring = ''' 這是 2014CDA 協同專案下的 cdag7 模組下的 MAN 類別.

執行 MAN 類別中的 assembly 方法

請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.
lego_man.7z(滑鼠右鍵存成 .7z 檔案)
''' return outstring @cherrypy.expose def assembly(self, *args, **kwargs): outstring = ''' ''' return outstring import collections import hashlib from django.contrib.auth.hashers import BasePasswordHasher, mask_hash from django.utils.crypto import constant_time_compare from django.utils.translation import ugettext class Sha256Hasher(BasePasswordHasher): """ SHA-256 password hasher. """ algorithm = 'sha256' digest = hashlib.sha256 def encode(self, password, salt): assert password assert salt and '$' not in salt hash = self.digest(salt + password).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return collections.OrderedDict([ (ugettext('algorithm'), algorithm), (ugettext('salt'), mask_hash(salt, show=2)), (ugettext('hash'), mask_hash(hash)), ]) #!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code related to line charts.""" import copy import warnings from graphy import common class LineStyle(object): """Represents the style for a line on a line chart. Also provides some convenient presets. Object attributes (Passed directly to the Google Chart API. Check there for details): width: Width of the line on: Length of a line segment (for dashed/dotted lines) off: Length of a break (for dashed/dotted lines) color: Color of the line. A hex string, like 'ff0000' for red. Optional, AutoColor will fill this in for you automatically if empty. Some common styles, such as LineStyle.dashed, are available: LineStyle.solid() LineStyle.dashed() LineStyle.dotted() LineStyle.thick_solid() LineStyle.thick_dashed() LineStyle.thick_dotted() """ # Widths THIN = 1 THICK = 2 # Patterns # ((on, off) tuples, as passed to LineChart.AddLine) SOLID = (1, 0) DASHED = (8, 4) DOTTED = (2, 4) def __init__(self, width, on, off, color=None): """Construct a LineStyle. See class docstring for details on args.""" self.width = width self.on = on self.off = off self.color = color @classmethod def solid(cls): return LineStyle(1, 1, 0) @classmethod def dashed(cls): return LineStyle(1, 8, 4) @classmethod def dotted(cls): return LineStyle(1, 2, 4) @classmethod def thick_solid(cls): return LineStyle(2, 1, 0) @classmethod def thick_dashed(cls): return LineStyle(2, 8, 4) @classmethod def thick_dotted(cls): return LineStyle(2, 2, 4) class LineChart(common.BaseChart): """Represents a line chart.""" def __init__(self, points=None): super(LineChart, self).__init__() if points is not None: self.AddLine(points) def AddLine(self, points, label=None, color=None, pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None): """Add a new line to the chart. This is a convenience method which constructs the DataSeries and appends it for you. It returns the new series. points: List of equally-spaced y-values for the line label: Name of the line (used for the legend) color: Hex string, like 'ff0000' for red pattern: Tuple for (length of segment, length of gap). i.e. LineStyle.DASHED width: Width of the line (i.e. LineStyle.THIN) markers: List of Marker objects to attach to this line (see DataSeries for more info) """ if color is not None and isinstance(color[0], common.Marker): warnings.warn('Your code may be broken! ' 'You passed a list of Markers instead of a color. The ' 'old argument order (markers before color) is deprecated.', DeprecationWarning, stacklevel=2) style = LineStyle(width, pattern[0], pattern[1], color=color) series = common.DataSeries(points, label=label, style=style, markers=markers) self.data.append(series) return series def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None, label=None): """DEPRECATED""" warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ', DeprecationWarning, stacklevel=2) return self.AddLine(points, color=color, width=style.width, pattern=(style.on, style.off), markers=markers, label=label) class Sparkline(LineChart): """Represent a sparkline. These behave like LineCharts, mostly, but come without axes. """ # urllib3/poolmanager.py # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import connection_from_url, port_by_scheme from .request import RequestMethods from .util import parse_url __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', 'ssl_version') class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: :: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) def _new_pool(self, scheme, host, port): """ Create a new :class:`ConnectionPool` based on host, port and scheme. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = pool_classes_by_scheme[scheme] kwargs = self.connection_pool_kw if scheme == 'http': kwargs = self.connection_pool_kw.copy() for kw in SSL_KEYWORDS: kwargs.pop(kw, None) return pool_cls(host, port, **kwargs) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ scheme = scheme or 'http' port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) # If the scheme, host, or port doesn't match existing open connections, # open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type pool = self._new_pool(scheme, host, port) self.pools[pool_key] = pool return pool def connection_from_url(self, url): """ Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn't pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown kw['redirect'] = redirect return self.urlopen(method, redirect_location, **kw) class ProxyManager(RequestMethods): """ Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method will make requests to any url through the defined proxy. The ProxyManager class will automatically set the 'Host' header if it is not provided. """ def __init__(self, proxy_pool): self.proxy_pool = proxy_pool def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {'Accept': '*/*'} host = parse_url(url).host if host: headers_['Host'] = host if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." kw['assert_same_host'] = False kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers')) return self.proxy_pool.urlopen(method, url, **kw) def proxy_from_url(url, **pool_kw): proxy_pool = connection_from_url(url, **pool_kw) return ProxyManager(proxy_pool) from ..Qt import QtGui, QtCore from .. import functions as fn import numpy as np __all__ = ['ArrowItem'] class ArrowItem(QtGui.QGraphicsPathItem): """ For displaying scale-invariant arrows. For arrows pointing to a location on a curve, see CurveArrow """ def __init__(self, **opts): """ Arrows can be initialized with any keyword arguments accepted by the setStyle() method. """ self.opts = {} QtGui.QGraphicsPathItem.__init__(self, opts.get('parent', None)) if 'size' in opts: opts['headLen'] = opts['size'] if 'width' in opts: opts['headWidth'] = opts['width'] pos = opts.pop('pos', (0, 0)) defaultOpts = { 'pxMode': True, 'angle': -150, ## If the angle is 0, the arrow points left 'headLen': 20, 'headWidth': None, 'tipAngle': 25, 'baseAngle': 0, 'tailLen': None, 'tailWidth': 3, 'pen': (200,200,200), 'brush': (50,50,200), } defaultOpts.update(opts) self.setStyle(**defaultOpts) # for backward compatibility self.setPos(*pos) def setStyle(self, **opts): """ Changes the appearance of the arrow. All arguments are optional: ====================== ================================================= **Keyword Arguments:** angle Orientation of the arrow in degrees. Default is 0; arrow pointing to the left. headLen Length of the arrow head, from tip to base. default=20 headWidth Width of the arrow head at its base. If headWidth is specified, it overrides tipAngle. tipAngle Angle of the tip of the arrow in degrees. Smaller values make a 'sharper' arrow. default=25 baseAngle Angle of the base of the arrow head. Default is 0, which means that the base of the arrow head is perpendicular to the arrow tail. tailLen Length of the arrow tail, measured from the base of the arrow head to the end of the tail. If this value is None, no tail will be drawn. default=None tailWidth Width of the tail. default=3 pen The pen used to draw the outline of the arrow. brush The brush used to fill the arrow. pxMode If True, then the arrow is drawn as a fixed size regardless of the scale of its parents (including the ViewBox zoom level). ====================== ================================================= """ arrowOpts = ['headLen', 'tipAngle', 'baseAngle', 'tailLen', 'tailWidth', 'headWidth'] allowedOpts = ['angle', 'pen', 'brush', 'pxMode'] + arrowOpts needUpdate = False for k,v in opts.items(): if k not in allowedOpts: raise KeyError('Invalid arrow style option "%s"' % k) if self.opts.get(k) != v: needUpdate = True self.opts[k] = v if not needUpdate: return opt = dict([(k,self.opts[k]) for k in arrowOpts if k in self.opts]) tr = QtGui.QTransform() tr.rotate(self.opts['angle']) self.path = tr.map(fn.makeArrowPath(**opt)) self.setPath(self.path) self.setPen(fn.mkPen(self.opts['pen'])) self.setBrush(fn.mkBrush(self.opts['brush'])) if self.opts['pxMode']: self.setFlags(self.flags() | self.ItemIgnoresTransformations) else: self.setFlags(self.flags() & ~self.ItemIgnoresTransformations) def paint(self, p, *args): p.setRenderHint(QtGui.QPainter.Antialiasing) QtGui.QGraphicsPathItem.paint(self, p, *args) #p.setPen(fn.mkPen('r')) #p.setBrush(fn.mkBrush(None)) #p.drawRect(self.boundingRect()) def shape(self): #if not self.opts['pxMode']: #return QtGui.QGraphicsPathItem.shape(self) return self.path ## dataBounds and pixelPadding methods are provided to ensure ViewBox can ## properly auto-range def dataBounds(self, ax, frac, orthoRange=None): pw = 0 pen = self.pen() if not pen.isCosmetic(): pw = pen.width() * 0.7072 if self.opts['pxMode']: return [0,0] else: br = self.boundingRect() if ax == 0: return [br.left()-pw, br.right()+pw] else: return [br.top()-pw, br.bottom()+pw] def pixelPadding(self): pad = 0 if self.opts['pxMode']: br = self.boundingRect() pad += (br.width()**2 + br.height()**2) ** 0.5 pen = self.pen() if pen.isCosmetic(): pad += max(1, pen.width()) * 0.7072 return pad import json from .oauth import OAuth2Test class BehanceOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.behance.BehanceOAuth2' access_token_body = json.dumps({ 'access_token': 'foobar', 'valid': 1, 'user': { 'username': 'foobar', 'city': 'Foo City', 'first_name': 'Foo', 'last_name': 'Bar', 'display_name': 'Foo Bar', 'url': 'http://www.behance.net/foobar', 'country': 'Fooland', 'company': '', 'created_on': 1355152329, 'state': '', 'fields': [ 'Programming', 'Web Design', 'Web Development' ], 'images': { '32': 'https://www.behance.net/assets/img/profile/' 'no-image-32.jpg', '50': 'https://www.behance.net/assets/img/profile/' 'no-image-50.jpg', '115': 'https://www.behance.net/assets/img/profile/' 'no-image-138.jpg', '129': 'https://www.behance.net/assets/img/profile/' 'no-image-138.jpg', '138': 'https://www.behance.net/assets/img/profile/' 'no-image-138.jpg', '78': 'https://www.behance.net/assets/img/profile/' 'no-image-78.jpg' }, 'id': 1010101, 'occupation': 'Software Developer' } }) expected_username = 'foobar' def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline() from tests import LimitedTestCase from eventlet import greenthread from eventlet.support import greenlets as greenlet _g_results = [] def passthru(*args, **kw): _g_results.append((args, kw)) return args, kw def waiter(a): greenthread.sleep(0.1) return a class Asserts(object): def assert_dead(self, gt): if hasattr(gt, 'wait'): self.assertRaises(greenlet.GreenletExit, gt.wait) assert gt.dead assert not gt class Spawn(LimitedTestCase, Asserts): def tearDown(self): global _g_results super(Spawn, self).tearDown() _g_results = [] def test_simple(self): gt = greenthread.spawn(passthru, 1, b=2) self.assertEqual(gt.wait(), ((1,), {'b': 2})) self.assertEqual(_g_results, [((1,), {'b': 2})]) def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) assert not gt.dead greenthread.sleep(0) assert gt.dead self.assertEqual(_g_results, [((2,), {'b': 3})]) def test_kill(self): gt = greenthread.spawn(passthru, 6) greenthread.kill(gt) self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) greenthread.kill(gt) self.assert_dead(gt) def test_kill_meth(self): gt = greenthread.spawn(passthru, 6) gt.kill() self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) gt.kill() self.assert_dead(gt) def test_kill_n(self): gt = greenthread.spawn_n(passthru, 7) greenthread.kill(gt) self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) greenthread.kill(gt) self.assert_dead(gt) def test_link(self): results = [] def link_func(g, *a, **kw): results.append(g) results.append(a) results.append(kw) gt = greenthread.spawn(passthru, 5) gt.link(link_func, 4, b=5) self.assertEqual(gt.wait(), ((5,), {})) self.assertEqual(results, [gt, (4,), {'b': 5}]) def test_link_after_exited(self): results = [] def link_func(g, *a, **kw): results.append(g) results.append(a) results.append(kw) gt = greenthread.spawn(passthru, 5) self.assertEqual(gt.wait(), ((5,), {})) gt.link(link_func, 4, b=5) self.assertEqual(results, [gt, (4,), {'b': 5}]) def test_link_relinks(self): # test that linking in a linked func doesn't cause infinite recursion. called = [] def link_func(g): g.link(link_func_pass) def link_func_pass(g): called.append(True) gt = greenthread.spawn(passthru) gt.link(link_func) gt.wait() self.assertEqual(called, [True]) class SpawnAfter(Spawn): def test_basic(self): gt = greenthread.spawn_after(0.1, passthru, 20) self.assertEqual(gt.wait(), ((20,), {})) def test_cancel(self): gt = greenthread.spawn_after(0.1, passthru, 21) gt.cancel() self.assert_dead(gt) def test_cancel_already_started(self): gt = greenthread.spawn_after(0, waiter, 22) greenthread.sleep(0) gt.cancel() self.assertEqual(gt.wait(), 22) def test_kill_already_started(self): gt = greenthread.spawn_after(0, waiter, 22) greenthread.sleep(0) gt.kill() self.assert_dead(gt) class SpawnAfterLocal(LimitedTestCase, Asserts): def setUp(self): super(SpawnAfterLocal, self).setUp() self.lst = [1] def test_timer_fired(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.sleep(0.2) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.3) assert self.lst == [], self.lst def test_timer_cancelled_upon_greenlet_exit(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.2) assert self.lst == [1], self.lst def test_spawn_is_not_cancelled(self): def func(): greenthread.spawn(self.lst.pop) # exiting immediatelly, but self.lst.pop must be called greenthread.spawn(func) greenthread.sleep(0.1) assert self.lst == [], self.lst import os import unittest from queue import Queue from bears.c_languages.codeclone_detection.ClangCloneDetectionBear import ( ClangCloneDetectionBear) from bears.c_languages.codeclone_detection.ClangFunctionDifferenceBear import ( ClangFunctionDifferenceBear) from tests.BearTestHelper import generate_skip_decorator from coalib.settings.Section import Section from coalib.settings.Setting import Setting @generate_skip_decorator(ClangCloneDetectionBear) class ClangCloneDetectionBearTest(unittest.TestCase): def setUp(self): self.base_test_path = os.path.abspath(os.path.join( os.path.dirname(__file__), "clone_detection_samples")) self.section = Section("default") self.section.append(Setting("files", "", origin=self.base_test_path)) self.section.append(Setting("max_clone_difference", "0.308")) self.clone_files = [os.listdir(os.path.join(self.base_test_path, "clones"))] def test_dependencies(self): self.assertIn(ClangFunctionDifferenceBear, ClangCloneDetectionBear.BEAR_DEPS) def test_configuration(self): self.section.append(Setting("average_calculation", "true")) self.section.append(Setting("poly_postprocessing", "false")) self.section.append(Setting("exp_postprocessing", "true")) self.clone_files = [ os.path.join(self.base_test_path, "clones", "s4c.c")] # Ignore the results, it may be possible that it still passes :) self.check_clone_detection_bear(self.clone_files, lambda results, msg: True) def test_non_clones(self): self.non_clone_files = [ os.path.join(self.base_test_path, "non_clones", elem) for elem in os.listdir(os.path.join(self.base_test_path, "non_clones"))] self.check_clone_detection_bear(self.non_clone_files, lambda results, msg: self.assertEqual(results, [], msg)) def test_clones(self): self.clone_files = [ os.path.join(self.base_test_path, "clones", elem) for elem in os.listdir(os.path.join(self.base_test_path, "clones"))] self.check_clone_detection_bear(self.clone_files, lambda results, msg: self.assertNotEqual(results, [], msg)) def check_clone_detection_bear(self, files, result_check_function): """ Checks the results of the CloneDetectionBear with the given function. :param files: The files to check. Each will be checked on its own. :param result_check_function: A function yielding an exception if the results are invalid. """ for file in files: difference_results = ClangFunctionDifferenceBear( {file: ""}, self.section, Queue()).run_bear_from_section([], {}) uut = ClangCloneDetectionBear( {file: ""}, self.section, Queue()) arg_dict = {"dependency_results": {ClangFunctionDifferenceBear.__name__: list(difference_results)}} result_check_function( list(uut.run_bear_from_section([], arg_dict)), "while analyzing "+file) """miscellaneous file manipulation utilities """ import numpy as np import pysal as ps import pandas as pd def check_dups(li): """checks duplicates in list of ID values ID values must be read in as a list __author__ = "Luc Anselin " Arguments --------- li : list of ID values Returns ------- a list with the duplicate IDs """ return list(set([x for x in li if li.count(x) > 1])) def dbfdups(dbfpath,idvar): """checks duplicates in a dBase file ID variable must be specified correctly __author__ = "Luc Anselin " Arguments --------- dbfpath : file path to dBase file idvar : ID variable in dBase file Returns ------- a list with the duplicate IDs """ db = ps.open(dbfpath,'r') li = db.by_col(idvar) return list(set([x for x in li if li.count(x) > 1])) def df2dbf(df, dbf_path, my_specs=None): ''' Convert a pandas.DataFrame into a dbf. __author__ = "Dani Arribas-Bel , Luc Anselin " ... Arguments --------- df : DataFrame Pandas dataframe object to be entirely written out to a dbf dbf_path : str Path to the output dbf. It is also returned by the function my_specs : list List with the field_specs to use for each column. Defaults to None and applies the following scheme: * int: ('N', 14, 0) - for all ints * float: ('N', 14, 14) - for all floats * str: ('C', 14, 0) - for string, object and category with all variants for different type sizes Note: use of dtypes.name may not be fully robust, but preferred apprach of using isinstance seems too clumsy ''' if my_specs: specs = my_specs else: """ type2spec = {int: ('N', 20, 0), np.int64: ('N', 20, 0), np.int32: ('N', 20, 0), np.int16: ('N', 20, 0), np.int8: ('N', 20, 0), float: ('N', 36, 15), np.float64: ('N', 36, 15), np.float32: ('N', 36, 15), str: ('C', 14, 0) } types = [type(df[i].iloc[0]) for i in df.columns] """ # new approach using dtypes.name to avoid numpy name issue in type type2spec = {'int': ('N', 20, 0), 'int8': ('N', 20, 0), 'int16': ('N', 20, 0), 'int32': ('N', 20, 0), 'int64': ('N', 20, 0), 'float': ('N', 36, 15), 'float32': ('N', 36, 15), 'float64': ('N', 36, 15), 'str': ('C', 14, 0), 'object': ('C', 14, 0), 'category': ('C', 14, 0) } types = [df[i].dtypes.name for i in df.columns] specs = [type2spec[t] for t in types] db = ps.open(dbf_path, 'w') db.header = list(df.columns) db.field_spec = specs for i, row in df.T.iteritems(): db.write(row) db.close() return dbf_path def dbf2df(dbf_path, index=None, cols=False, incl_index=False): ''' Read a dbf file as a pandas.DataFrame, optionally selecting the index variable and which columns are to be loaded. __author__ = "Dani Arribas-Bel " ... Arguments --------- dbf_path : str Path to the DBF file to be read index : str Name of the column to be used as the index of the DataFrame cols : list List with the names of the columns to be read into the DataFrame. Defaults to False, which reads the whole dbf incl_index : Boolean If True index is included in the DataFrame as a column too. Defaults to False Returns ------- df : DataFrame pandas.DataFrame object created ''' db = ps.open(dbf_path) if cols: if incl_index: cols.append(index) vars_to_read = cols else: vars_to_read = db.header data = dict([(var, db.by_col(var)) for var in vars_to_read]) if index: index = db.by_col(index) db.close() return pd.DataFrame(data, index=index, columns=vars_to_read) else: db.close() return pd.DataFrame(data,columns=vars_to_read) def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2): ''' Wrapper function to merge two dbf files into a new dbf file. __author__ = "Luc Anselin " Uses dbf2df and df2dbf to read and write the dbf files into a pandas DataFrame. Uses all default settings for dbf2df and df2dbf (see docs for specifics). ... Arguments --------- dbf1_path : str Path to the first (left) dbf file dbf2_path : str Path to the second (right) dbf file out_path : str Path to the output dbf file (returned by the function) joinkey1 : str Variable name for the key in the first dbf. Must be specified. Key must take unique values. joinkey2 : str Variable name for the key in the second dbf. Must be specified. Key must take unique values. Returns ------- dbfpath : path to output file ''' df1 = dbf2df(dbf1_path,index=joinkey1) df2 = dbf2df(dbf2_path,index=joinkey2) dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False) dp = df2dbf(dfbig,out_path) return dp def dta2dbf(dta_path,dbf_path): """ Wrapper function to convert a stata dta file into a dbf file. __author__ = "Luc Anselin " Uses df2dbf to write the dbf files from a pandas DataFrame. Uses all default settings for df2dbf (see docs for specifics). ... Arguments --------- dta_path : str Path to the Stata dta file dbf_path : str Path to the output dbf file Returns ------- dbf_path : path to output file """ db = pd.read_stata(dta_path) dp = df2dbf(db,dbf_path) return dp #-*- coding:utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP SA (). All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.report import report_sxw from openerp.osv import osv from openerp.addons.hr_payroll import report class payslip_details_report_in(report.report_payslip_details.payslip_details_report): def __init__(self, cr, uid, name, context): super(payslip_details_report_in, self).__init__(cr, uid, name, context) self.localcontext.update({ 'get_details_by_rule_category': self.get_details_by_rule_category, }) class wrapped_report_payslipdetailsin(osv.AbstractModel): _name = 'report.l10n_in_hr_payroll.report_payslipdetails' _inherit = 'report.abstract_report' _template = 'l10n_in_hr_payroll.report_payslipdetails' _wrapped_report_class = payslip_details_report_in # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: ################################# LICENSE ################################## # Copyright (c) 2009, South African Astronomical Observatory (SAAO) # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer # # in the documentation and/or other materials provided with the # # distribution. # # * Neither the name of the South African Astronomical Observatory # # (SAAO) nor the names of its contributors may be used to endorse # # or promote products derived from this software without specific # # prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR # # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # # DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY # # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ #!/usr/bin/env python """ SALTGAIN corrects the multiplies images by a constant factor appropriate for gain correction. Each CCD in SALT's SALTICAM and RSS instruments has two readout nodes, SALTICAM has a two-CCD mosaic and RSS has a three-CCD mosaic. Each amplifier has a specific gain factor which varies slowly over time but which is constant across the amplifiers. Gain values depend on the readout speed and gain setting of the CCD. For all possible permutations, gains are stored in an ascii table which is updated periodically. Saltgain extracts gains from the ascii table and applies them to raw data. New gain and readout noise values will be written to the header keywords of each HDU. Keyword writing can also occur without peforming the gain correction itself. If the gain correction is performed, a keyword, GAINMULT is added to the image extension with the value 1.0. If the gain correction is not performed, GAINMULT wil contain the gain factor recorded in the ascii table. The purpose of the GAINMULT keyword is to report what multiplicative factor is required to gain correct an image. Based on data in image keywords, e.g. gain setting, readout speed and amplifier number, saltgain will extract the correct gain and readout noise values from the ascii table and update keywords and optionally perform the gain correction. Optionally, saltgain will extract the values from the data headers and perform the gain correction using those values Author Version Date ----------------------------------------------- Martin Still (SAAO) 1.0 31 Aug 2006 S. M. Crawford (SAAO) 2.0 20 May 2011 Updates ----------------------------------------------- 20110520 --the new error handling --able to read in values from the headers --able to write out to an outimages or outpref --ability to do non-linear gain correction TODO ----------------------------------------------- -Keywords and formats for non-linear gain correction need to be finalized -ability for config file to handle non-linear gains """ from __future__ import with_statement from pyraf import iraf import saltsafekey as saltkey import saltsafeio as saltio from saltsafelog import logging, history from salterror import SaltError debug=True # ----------------------------------------------------------- # core routine def saltgain(images,outimages, outpref, gaindb=None,usedb=False, mult=True, clobber=True, logfile='salt.log',verbose=True): #start logging with logging(logfile,debug) as log: # Check the input images infiles = saltio.argunpack ('Input',images) # create list of output files outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'') #verify that the input and output lists are the same length saltio.comparelists(infiles,outfiles,'Input','output') # read in the database file if usedb is true if usedb: gaindb = gaindb.strip() dblist= saltio.readgaindb(gaindb) else: dblist=[] for img, oimg in zip(infiles, outfiles): #open the fits file struct=saltio.openfits(img) # identify instrument instrume,keyprep,keygain,keybias,keyxtalk,keyslot = saltkey.instrumid(struct) # has file been prepared already? if saltkey.found(keygain, struct[0]): message='SALTGAIN: %s has already been gain-corrected' % img raise SaltError(message) # gain correct the data struct = gain(struct,mult=mult, usedb=usedb, dblist=dblist, log=log, verbose=verbose) # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],keygain, 'Images have been gain corrected', hist) # write FITS file saltio.writefits(struct,oimg, clobber=clobber) saltio.closefits(struct) # ----------------------------------------------------------- # gain correct data def gain(struct,mult=True,usedb=False, dblist=None, ampccd=2, log=None, verbose=True): """gain processes a image hduList and gain corrects each amplifier. It can either use gain settings in the header or those supplied in a config file which would be suppleid in the dblist (see helpfile for structure of the config file). If variance frames exist, it will update those for changes in the header value as well. In the end, it will update the gain with a value of one signfing the data has been transformed into e- from ADU The program will look for the non-linear gain settings which are given by: e = GAIN*(1 + GAIN1*E-6*ADU)*ADU mult--if true, multiple the gains usedb--use the values in the dblist, if false use the header values dblist--values for the gain and readnoise from the ampccd--number of amplifiers per ccd dblist should have the following lists: speed, rate, gain, noise, bias, amp """ #get the infile name infile=saltkey.getimagename(struct[0]) #how many science extensions nsciext = saltkey.get('NSCIEXT',struct[0]) #how many data extensions nextend = saltkey.get('NSCIEXT',struct[0]) # how many amplifiers? amplifiers = ampccd*saltkey.get('NCCDS',struct[0]) #read the gain and rospeed for the image gainset = saltkey.get('GAINSET',struct[0]) rospeed = saltkey.get('ROSPEED',struct[0]) #loop through each amplifier and gain correct it if log: message = '%28s %6s %5s %3s %5s %5s' \ % ('HDU','GAIN','SPEED','AMP','GAIN','NOISE') log.message('\n ---------------------------------------------------', \ with_header=False, with_stdout=verbose) log.message(message, with_header=False, with_stdout=verbose) log.message(' ---------------------------------------------------', \ with_header=False, with_stdout=verbose) for i in range(nsciext): hdu = i + 1 amp = i%amplifiers+1 #get the gain and rdnoise values for the array if usedb: gain, rdnoise=get_values(dblist, gainset, rospeed, amp) gain1=0 else: gain = saltkey.get('GAIN',struct[hdu]) rdnoise = saltkey.get('RDNOISE',struct[hdu]) try: gain1=saltkey.get('GAIN1',struct[hdu]) except: gain1=0 if mult: #correct the gain gainmult=1 try: data=struct[hdu].data struct[hdu].data=gain*data+gain1*data**2 except Exception, e: msg='Cannot gain correct %s[%i] because %s' % (infile, hdu, e) raise SaltError(msg) #correct the variance frame if saltkey.found('VAREXT', struct[hdu]): vhdu=saltkey.get('VAREXT', struct[hdu]) try: vdata=struct[vhdu].data struct[vhdu].data=vdata*gain*(1+2*gain1*1e-6*data) except Exception, e: msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e) raise SaltError(msg) else: gainmult=gain #update the headers if usedb: saltkey.put('GAIN',gain,struct[hdu]) saltkey.put('RDNOISE',rdnoise,struct[hdu]) #add a keyword indicating what action was taken saltkey.new('GAINMULT',gainmult,'Gain multiplication', struct[hdu]) #if logging is true, then print out the following information if log: message = '%25s[%1d] %6s %5s %2s %6.2f %5.2f' \ % (infile,hdu,gainset,rospeed,amp, gain, rdnoise) log.message(message, with_header=False, with_stdout=verbose) #just to make it look pretty if log: log.message('', with_header=False, with_stdout=verbose) return struct def get_values(dblist, gainset, rospeed, amp): """Get values for gain and rdnoise from the dblist. The input of the dblist should be: dblist should have the following lists: speed, rate, gain, noise, bias, amp """ #default values in case none are found gain=1 rdnoise=5 found=False dbspeed, dbrate, dbgain, dbnoise, dbbias, dbamp=dblist #checkf or the gain if gainset not in dbrate: raise SaltError('GAINSET=%s does not match any vaue in gaindb file' % gainset) if rospeed not in dbspeed: raise SaltError('ROSPEED=%s does not match any vaue in gaindb file' % rospeed) #loop through the values and find the corresponding gain/rdnoise for i in range(len(dbspeed)): if dbspeed[i].strip().upper()==rospeed.strip().upper(): if dbrate[i].strip().upper()==gainset.strip().upper(): if int(dbamp[i])==int(amp): gain=float(dbgain[i]) rdnoise=float(dbnoise[i]) found=True return gain, rdnoise if not found: msg='Could not find a corresponding setting in the gaindb file for gainset=%s, rospeed=%s, amp=%i' \ % (gainset, rospeed, amp) raise SaltError(msg) return rdnoise, gain # ----------------------------------------------------------- # main code if not iraf.deftask('saltgain'): parfile = iraf.osfn("saltred$saltgain.par") t = iraf.IrafTaskFactory(taskname="saltgain",value=parfile,function=saltgain, pkgname='saltred') #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_config short_description: Manage BIG-IP configuration sections description: - Manages a BIG-IP configuration by allowing TMSH commands that modify running configuration, or merge SCF formatted files into the running configuration. Additionally, this module is of significant importance because it allows you to save your running configuration to disk. Since the F5 module only manipulate running configuration, it is important that you utilize this module to save that running config. version_added: 2.4 options: save: description: - The C(save) argument instructs the module to save the running-config to startup-config. - This operation is performed after any changes are made to the current running config. If no changes are made, the configuration is still saved to the startup config. - This option will always cause the module to return changed. type: bool default: yes reset: description: - Loads the default configuration on the device. - If this option is specified, the default configuration will be loaded before any commands or other provided configuration is run. type: bool default: no merge_content: description: - Loads the specified configuration that you want to merge into the running configuration. This is equivalent to using the C(tmsh) command C(load sys config from-terminal merge). - If you need to read configuration from a file or template, use Ansible's C(file) or C(template) lookup plugins respectively. type: str verify: description: - Validates the specified configuration to see whether they are valid to replace the running configuration. - The running configuration will not be changed. - When this parameter is set to C(yes), no change will be reported by the module. type: bool default: no extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Save the running configuration of the BIG-IP bigip_config: save: yes provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Reset the BIG-IP configuration, for example, to RMA the device bigip_config: reset: yes save: yes provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Load an SCF configuration bigip_config: merge_content: "{{ lookup('file', '/path/to/config.scf') }}" provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost ''' RETURN = r''' stdout: description: The set of responses from the options returned: always type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always type: list sample: [['...', '...'], ['...'], ['...']] ''' try: from StringIO import StringIO except ImportError: from io import StringIO import os import tempfile from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.icontrol import upload_file except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.icontrol import upload_file class Parameters(AnsibleF5Parameters): returnables = ['stdout', 'stdout_lines'] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = Parameters(params=self.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(params=changed) def _to_lines(self, stdout): lines = list() for item in stdout: if isinstance(item, str): item = str(item).split('\n') lines.append(item) return lines def exec_module(self): result = {} changed = self.execute() result.update(**self.changes.to_return()) result.update(dict(changed=changed)) return result def execute(self): responses = [] if self.want.reset: response = self.reset() responses.append(response) if self.want.merge_content: if self.want.verify: response = self.merge(verify=True) responses.append(response) else: response = self.merge(verify=False) responses.append(response) if self.want.save: response = self.save() responses.append(response) self._detect_errors(responses) changes = { 'stdout': responses, 'stdout_lines': self._to_lines(responses) } self.changes = Parameters(params=changes) if self.want.verify: return False return True def _detect_errors(self, stdout): errors = [ 'Unexpected Error:' ] msg = [x for x in stdout for y in errors if y in x] if msg: # Error only contains the lines that include the error raise F5ModuleError(' '.join(msg)) def reset(self): if self.module.check_mode: return True return self.reset_device() def reset_device(self): command = 'tmsh load sys config default' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) def merge(self, verify=True): temp_name = next(tempfile._get_candidate_names()) remote_path = "/var/config/rest/downloads/{0}".format(temp_name) temp_path = '/tmp/' + temp_name if self.module.check_mode: return True self.upload_to_device(temp_name) self.move_on_device(remote_path) response = self.merge_on_device( remote_path=temp_path, verify=verify ) self.remove_temporary_file(remote_path=temp_path) return response def merge_on_device(self, remote_path, verify=True): command = 'tmsh load sys config file {0} merge'.format( remote_path ) if verify: command += ' verify' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) def remove_temporary_file(self, remote_path): uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs=remote_path ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def move_on_device(self, remote_path): uri = "https://{0}:{1}/mgmt/tm/util/unix-mv".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='{0} /tmp/{1}'.format( remote_path, os.path.basename(remote_path) ) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def upload_to_device(self, temp_name): template = StringIO(self.want.merge_content) url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, template, temp_name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def save(self): if self.module.check_mode: return True return self.save_on_device() def save_on_device(self): command = 'tmsh save sys config' uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(command) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( reset=dict( type='bool', default=False ), merge_content=dict(), verify=dict( type='bool', default=False ), save=dict( type='bool', default='yes' ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main() """Class for post-handshake certificate checking.""" from utils.cryptomath import hashAndBase64 from X509 import X509 from X509CertChain import X509CertChain from errors import * class Checker: """This class is passed to a handshake function to check the other party's certificate chain. If a handshake function completes successfully, but the Checker judges the other party's certificate chain to be missing or inadequate, a subclass of L{tlslite.errors.TLSAuthenticationError} will be raised. Currently, the Checker can check either an X.509 or a cryptoID chain (for the latter, cryptoIDlib must be installed). """ def __init__(self, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, checkResumedSession=False): """Create a new Checker instance. You must pass in one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) @type cryptoID: str @param cryptoID: A cryptoID which the other party's certificate chain must match. The cryptoIDlib module must be installed. Mutually exclusive with all of the 'x509...' arguments. @type protocol: str @param protocol: A cryptoID protocol URI which the other party's certificate chain must match. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: A hex-encoded X.509 end-entity fingerprint which the other party's end-entity certificate must match. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type checkResumedSession: bool @param checkResumedSession: If resumed sessions should be checked. This defaults to False, on the theory that if the session was checked once, we don't need to bother re-checking it. """ if cryptoID and (x509Fingerprint or x509TrustList): raise ValueError() if x509Fingerprint and x509TrustList: raise ValueError() if x509CommonName and not x509TrustList: raise ValueError() if protocol and not cryptoID: raise ValueError() if cryptoID: import cryptoIDlib #So we raise an error here if x509TrustList: import cryptlib_py #So we raise an error here self.cryptoID = cryptoID self.protocol = protocol self.x509Fingerprint = x509Fingerprint self.x509TrustList = x509TrustList self.x509CommonName = x509CommonName self.checkResumedSession = checkResumedSession def __call__(self, connection): """Check a TLSConnection. When a Checker is passed to a handshake function, this will be called at the end of the function. @type connection: L{tlslite.TLSConnection.TLSConnection} @param connection: The TLSConnection to examine. @raise tlslite.errors.TLSAuthenticationError: If the other party's certificate chain is missing or bad. """ if not self.checkResumedSession and connection.resumed: return if self.cryptoID or self.x509Fingerprint or self.x509TrustList: if connection._client: chain = connection.session.serverCertChain else: chain = connection.session.clientCertChain if self.x509Fingerprint or self.x509TrustList: if isinstance(chain, X509CertChain): if self.x509Fingerprint: if chain.getFingerprint() != self.x509Fingerprint: raise TLSFingerprintError(\ "X.509 fingerprint mismatch: %s, %s" % \ (chain.getFingerprint(), self.x509Fingerprint)) else: #self.x509TrustList if not chain.validate(self.x509TrustList): raise TLSValidationError("X.509 validation failure") if self.x509CommonName and \ (chain.getCommonName() != self.x509CommonName): raise TLSAuthorizationError(\ "X.509 Common Name mismatch: %s, %s" % \ (chain.getCommonName(), self.x509CommonName)) elif chain: raise TLSAuthenticationTypeError() else: raise TLSNoAuthenticationError() elif self.cryptoID: import cryptoIDlib.CertChain if isinstance(chain, cryptoIDlib.CertChain.CertChain): if chain.cryptoID != self.cryptoID: raise TLSFingerprintError(\ "cryptoID mismatch: %s, %s" % \ (chain.cryptoID, self.cryptoID)) if self.protocol: if not chain.checkProtocol(self.protocol): raise TLSAuthorizationError(\ "cryptoID protocol mismatch") if not chain.validate(): raise TLSValidationError("cryptoID validation failure") elif chain: raise TLSAuthenticationTypeError() else: raise TLSNoAuthenticationError() # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Imperative mode for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow import * # pylint: disable=wildcard-import from tensorflow.contrib.imperative import imperative_mode class _InteractiveMode(object): """Imperative mode suitable for interactive execution. This module has a global _InteractiveMode object that enables writing code as follows: ```python import tensorflow.contrib.imperative as tf print(tf.constant(42)) ``` """ def __init__(self, target=None): if not target: target = train.Server.create_local_server().target self.target = target self.imperative_mode = imperative_mode.ImperativeMode(self.target) self.imperative_mode.__enter__() def new_step(self): return self.imperative_mode.new_step() _default_interactive_mode = _InteractiveMode() def new_step(): return _default_interactive_mode.new_step() #*************************************************************************** #* * #* Copyright (c) 2009, 2010 * #* Ken Cline * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** import FreeCAD, math, DraftVecUtils from FreeCAD import Vector __title__="FreeCAD Working Plane utility" __author__ = "Ken Cline" __url__ = "http://www.freecadweb.org" ''' This module provides a class called plane to assist in selecting and maintaining a working plane. ''' class plane: '''A WorkPlane object''' def __init__(self,u=Vector(1,0,0),v=Vector(0,1,0),w=Vector(0,0,1),pos=Vector(0,0,0)): # keep track of active document. Reset view when doc changes. self.doc = None # self.weak is true if the plane has been defined by self.setup or has been reset self.weak = True # u, v axes and position define plane, perpendicular axis is handy, though redundant. self.u = u self.v = v self.axis = w self.position = pos # a placeholder for a stored state self.stored = None def __repr__(self): return "Workplane x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis)) def offsetToPoint(self, p, direction=None): ''' Return the signed distance from p to the plane, such that p + offsetToPoint(p)*direction lies on the plane. direction defaults to -plane.axis ''' ''' A picture will help explain the computation: p //| / / | / / | / / | / / | -------------------- plane -----c-----x-----a-------- Here p is the specified point, c is a point (in this case plane.position) on the plane x is the intercept on the plane from p in the specified direction, and a is the perpendicular intercept on the plane (i.e. along plane.axis) Using vertival bars to denote the length operator, |ap| = |cp| * cos(apc) = |xp| * cos(apx) so |xp| = |cp| * cos(apc) / cos(apx) = (cp . axis) / (direction . axis) ''' if direction == None: direction = self.axis return direction.dot(self.position.sub(p)) def projectPoint(self, p, direction=None): '''project point onto plane, default direction is orthogonal''' if not direction: direction = self.axis lp = self.getLocalCoords(p) gp = self.getGlobalCoords(Vector(lp.x,lp.y,0)) a = direction.getAngle(gp.sub(p)) if a > math.pi/2: direction = direction.negative() a = math.pi - a ld = self.getLocalRot(direction) gd = self.getGlobalRot(Vector(ld.x,ld.y,0)) hyp = abs(math.tan(a) * lp.z) return gp.add(DraftVecUtils.scaleTo(gd,hyp)) def projectPointOld(self, p, direction=None): '''project point onto plane, default direction is orthogonal. Obsolete''' if not direction: direction = self.axis t = Vector(direction) #t.normalize() a = round(t.getAngle(self.axis),DraftVecUtils.precision()) pp = round((math.pi)/2,DraftVecUtils.precision()) if a == pp: return p t.multiply(self.offsetToPoint(p, direction)) return p.add(t) def alignToPointAndAxis(self, point, axis, offset=0, upvec=None): self.doc = FreeCAD.ActiveDocument self.axis = axis; self.axis.normalize() if (DraftVecUtils.equals(axis, Vector(1,0,0))): self.u = Vector(0,1,0) self.v = Vector(0,0,1) elif (DraftVecUtils.equals(axis, Vector(-1,0,0))): self.u = Vector(0,-1,0) self.v = Vector(0,0,1) elif upvec: self.v = upvec self.v.normalize() self.u = self.v.cross(self.axis) else: self.v = axis.cross(Vector(1,0,0)) self.v.normalize() self.u = DraftVecUtils.rotate(self.v, -math.pi/2, self.axis) offsetVector = Vector(axis); offsetVector.multiply(offset) self.position = point.add(offsetVector) self.weak = False # FreeCAD.Console.PrintMessage("(position = " + str(self.position) + ")\n") # FreeCAD.Console.PrintMessage("Current workplane: x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis))+"\n") def alignToPointAndAxis_SVG(self, point, axis, offset): # based on cases table self.doc = FreeCAD.ActiveDocument self.axis = axis; self.axis.normalize() ref_vec = Vector(0.0, 1.0, 0.0) if ((abs(axis.x) > abs(axis.y)) and (abs(axis.y) > abs(axis.z))): ref_vec = Vector(0.0, 0., 1.0) self.u = axis.negative().cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "Case new" elif ((abs(axis.y) > abs(axis.z)) and (abs(axis.z) >= abs(axis.x))): ref_vec = Vector(1.0, 0.0, 0.0) self.u = axis.negative().cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "Y>Z, View Y" elif ((abs(axis.y) >= abs(axis.x)) and (abs(axis.x) > abs(axis.z))): ref_vec = Vector(0.0, 0., 1.0) self.u = axis.cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "ehem. XY, Case XY" elif ((abs(axis.x) > abs(axis.z)) and (abs(axis.z) >= abs(axis.y))): self.u = axis.cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "X>Z, View X" elif ((abs(axis.z) >= abs(axis.y)) and (abs(axis.y) > abs(axis.x))): ref_vec = Vector(1.0, 0., 0.0) self.u = axis.cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "Y>X, Case YZ" else: self.u = axis.negative().cross(ref_vec) self.u.normalize() self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis) #projcase = "else" #spat_vec = self.u.cross(self.v) #spat_res = spat_vec.dot(axis) #FreeCAD.Console.PrintMessage(projcase + " spat Prod = " + str(spat_res) + "\n") offsetVector = Vector(axis); offsetVector.multiply(offset) self.position = point.add(offsetVector) self.weak = False # FreeCAD.Console.PrintMessage("(position = " + str(self.position) + ")\n") # FreeCAD.Console.PrintMessage("Current workplane: x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis))+"\n") def alignToCurve(self, shape, offset): if shape.ShapeType == 'Edge': #??? TODO: process curve here. look at shape.edges[0].Curve return False elif shape.ShapeType == 'Wire': #??? TODO: determine if edges define a plane return False else: return False def alignToEdges(self,edges): # use a list of edges to find a plane position if len(edges) > 2: return False # for axes systems, we suppose the 2 first edges are parallel # ??? TODO: exclude other cases first v1 = edges[0].Vertexes[-1].Point.sub(edges[0].Vertexes[0].Point) v2 = edges[1].Vertexes[0].Point.sub(edges[0].Vertexes[0].Point) v3 = v1.cross(v2) v1.normalize() v2.normalize() v3.normalize() #print v1,v2,v3 self.u = v1 self.v = v2 self.axis = v3 def alignToFace(self, shape, offset=0): # Set face to the unique selected face, if found if shape.ShapeType == 'Face': self.alignToPointAndAxis(shape.Faces[0].CenterOfMass, shape.Faces[0].normalAt(0,0), offset) import DraftGeomUtils q = DraftGeomUtils.getQuad(shape) if q: self.u = q[1] self.v = q[2] if not DraftVecUtils.equals(self.u.cross(self.v),self.axis): self.u = q[2] self.v = q[1] if DraftVecUtils.equals(self.u,Vector(0,0,1)): # the X axis is vertical: rotate 90 degrees self.u,self.v = self.v.negative(),self.u elif DraftVecUtils.equals(self.u,Vector(0,0,-1)): self.u,self.v = self.v,self.u.negative() self.weak = False return True else: return False def alignToSelection(self, offset): '''If selection uniquely defines a plane, align working plane to it. Return success (bool)''' import FreeCADGui sex = FreeCADGui.Selection.getSelectionEx(FreeCAD.ActiveDocument.Name) if len(sex) == 0: return False elif len(sex) == 1: if not sex[0].Object.isDerivedFrom("Part::Shape"): return False return self.alignToCurve(sex[0].Object.Shape, offset) \ or self.alignToFace(sex[0].Object.Shape, offset) \ or (len(sex[0].SubObjects) == 1 and self.alignToFace(sex[0].SubObjects[0], offset)) else: # len(sex) > 2, look for point and line, three points, etc. return False def setup(self, direction=None, point=None, upvec=None): '''If working plane is undefined, define it!''' if self.weak: if direction and point: self.alignToPointAndAxis(point, direction, 0, upvec) else: try: import FreeCADGui from pivy import coin rot = FreeCADGui.ActiveDocument.ActiveView.getCameraNode().getField("orientation").getValue() upvec = Vector(rot.multVec(coin.SbVec3f(0,1,0)).getValue()) vdir = FreeCADGui.ActiveDocument.ActiveView.getViewDirection() self.alignToPointAndAxis(Vector(0,0,0), vdir.negative(), 0, upvec) except: pass self.weak = True def reset(self): self.doc = None self.weak = True def getRotation(self): "returns a placement describing the working plane orientation ONLY" m = DraftVecUtils.getPlaneRotation(self.u,self.v,self.axis) return FreeCAD.Placement(m) def getPlacement(self,rotated=False):